X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Figb_ethdev.c;h=d0e2bc981420cd43d12347176358b50757db152f;hb=323263717774df318d8a6e64ac8bfe546e03b8f6;hp=e21a772fb0971addc1637186e10de66bdea56dee;hpb=6d13ea8e8e49ab957deae2bba5ecf4a4bfe747d1;p=dpdk.git diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index e21a772fb0..d0e2bc9814 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -17,8 +17,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include @@ -74,15 +74,15 @@ static int eth_igb_configure(struct rte_eth_dev *dev); static int eth_igb_start(struct rte_eth_dev *dev); -static void eth_igb_stop(struct rte_eth_dev *dev); +static int eth_igb_stop(struct rte_eth_dev *dev); static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev); static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev); -static void eth_igb_close(struct rte_eth_dev *dev); +static int eth_igb_close(struct rte_eth_dev *dev); static int eth_igb_reset(struct rte_eth_dev *dev); -static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev); -static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev); -static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev); -static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev); +static int eth_igb_promiscuous_enable(struct rte_eth_dev *dev); +static int eth_igb_promiscuous_disable(struct rte_eth_dev *dev); +static int eth_igb_allmulticast_enable(struct rte_eth_dev *dev); +static int eth_igb_allmulticast_disable(struct rte_eth_dev *dev); static int eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete); static int eth_igb_stats_get(struct rte_eth_dev *dev, @@ -96,16 +96,16 @@ static int eth_igb_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, unsigned int size); static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev, - struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, + const uint64_t *ids, struct rte_eth_xstat_name *xstats_names, unsigned int limit); -static void eth_igb_stats_reset(struct rte_eth_dev *dev); -static void eth_igb_xstats_reset(struct rte_eth_dev *dev); +static int eth_igb_stats_reset(struct rte_eth_dev *dev); +static int eth_igb_xstats_reset(struct rte_eth_dev *dev); static int eth_igb_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size); -static void eth_igb_infos_get(struct rte_eth_dev *dev, +static int eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev); -static void eth_igbvf_infos_get(struct rte_eth_dev *dev, +static int eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); @@ -154,12 +154,12 @@ static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, static void igbvf_intr_disable(struct e1000_hw *hw); static int igbvf_dev_configure(struct rte_eth_dev *dev); static int igbvf_dev_start(struct rte_eth_dev *dev); -static void igbvf_dev_stop(struct rte_eth_dev *dev); -static void igbvf_dev_close(struct rte_eth_dev *dev); -static void igbvf_promiscuous_enable(struct rte_eth_dev *dev); -static void igbvf_promiscuous_disable(struct rte_eth_dev *dev); -static void igbvf_allmulticast_enable(struct rte_eth_dev *dev); -static void igbvf_allmulticast_disable(struct rte_eth_dev *dev); +static int igbvf_dev_stop(struct rte_eth_dev *dev); +static int igbvf_dev_close(struct rte_eth_dev *dev); +static int igbvf_promiscuous_enable(struct rte_eth_dev *dev); +static int igbvf_promiscuous_disable(struct rte_eth_dev *dev); +static int igbvf_allmulticast_enable(struct rte_eth_dev *dev); +static int igbvf_allmulticast_disable(struct rte_eth_dev *dev); static int eth_igbvf_link_update(struct e1000_hw *hw); static int eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats); @@ -168,7 +168,7 @@ static int eth_igbvf_xstats_get(struct rte_eth_dev *dev, static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, unsigned limit); -static void eth_igbvf_stats_reset(struct rte_eth_dev *dev); +static int eth_igbvf_stats_reset(struct rte_eth_dev *dev); static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on); @@ -186,38 +186,16 @@ static int eth_igb_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); -static int eth_igb_syn_filter_get(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter); -static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg); static int igb_add_2tuple_filter(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *ntuple_filter); static int igb_remove_2tuple_filter(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *ntuple_filter); -static int eth_igb_get_flex_filter(struct rte_eth_dev *dev, - struct rte_eth_flex_filter *filter); -static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg); static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *ntuple_filter); static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *ntuple_filter); -static int igb_get_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *filter); -static int igb_ntuple_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg); -static int igb_ethertype_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg); -static int igb_get_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter); -static int eth_igb_filter_ctrl(struct rte_eth_dev *dev, - enum rte_filter_type filter_type, - enum rte_filter_op filter_op, - void *arg); +static int eth_igb_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops); static int eth_igb_get_reg_length(struct rte_eth_dev *dev); static int eth_igb_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs); @@ -380,10 +358,6 @@ static const struct eth_dev_ops eth_igb_ops = { .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable, .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable, .rx_queue_release = eth_igb_rx_queue_release, - .rx_queue_count = eth_igb_rx_queue_count, - .rx_descriptor_done = eth_igb_rx_descriptor_done, - .rx_descriptor_status = eth_igb_rx_descriptor_status, - .tx_descriptor_status = eth_igb_tx_descriptor_status, .tx_queue_setup = eth_igb_tx_queue_setup, .tx_queue_release = eth_igb_tx_queue_release, .tx_done_cleanup = eth_igb_tx_done_cleanup, @@ -398,7 +372,7 @@ static const struct eth_dev_ops eth_igb_ops = { .reta_query = eth_igb_rss_reta_query, .rss_hash_update = eth_igb_rss_hash_update, .rss_hash_conf_get = eth_igb_rss_hash_conf_get, - .filter_ctrl = eth_igb_filter_ctrl, + .flow_ops_get = eth_igb_flow_ops_get, .set_mc_addr_list = eth_igb_set_mc_addr_list, .rxq_info_get = igb_rxq_info_get, .txq_info_get = igb_txq_info_get, @@ -441,11 +415,9 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = { .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, .rx_queue_setup = eth_igb_rx_queue_setup, .rx_queue_release = eth_igb_rx_queue_release, - .rx_descriptor_done = eth_igb_rx_descriptor_done, - .rx_descriptor_status = eth_igb_rx_descriptor_status, - .tx_descriptor_status = eth_igb_tx_descriptor_status, .tx_queue_setup = eth_igb_tx_queue_setup, .tx_queue_release = eth_igb_tx_queue_release, + .tx_done_cleanup = eth_igb_tx_done_cleanup, .set_mc_addr_list = eth_igb_set_mc_addr_list, .rxq_info_get = igb_rxq_info_get, .txq_info_get = igb_txq_info_get, @@ -543,7 +515,7 @@ igb_intr_enable(struct rte_eth_dev *dev) struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; if (rte_intr_allow_others(intr_handle) && dev->data->dev_conf.intr_conf.lsc != 0) { @@ -560,7 +532,7 @@ igb_intr_disable(struct rte_eth_dev *dev) struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; if (rte_intr_allow_others(intr_handle) && dev->data->dev_conf.intr_conf.lsc != 0) { @@ -753,6 +725,9 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev) uint32_t ctrl_ext; eth_dev->dev_ops = ð_igb_ops; + eth_dev->rx_queue_count = eth_igb_rx_queue_count; + eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status; + eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status; eth_dev->rx_pkt_burst = ð_igb_recv_pkts; eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; eth_dev->tx_pkt_prepare = ð_igb_prep_pkts; @@ -830,17 +805,17 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for storing MAC addresses */ eth_dev->data->mac_addrs = rte_zmalloc("e1000", - ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); + RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " "store MAC addresses", - ETHER_ADDR_LEN * hw->mac.rar_entry_count); + RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); error = -ENOMEM; goto err_late; } /* Copy the permanent MAC address */ - ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); /* initialize the vfta */ @@ -876,16 +851,18 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev) eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id); - rte_intr_callback_register(&pci_dev->intr_handle, + rte_intr_callback_register(pci_dev->intr_handle, eth_igb_interrupt_handler, (void *)eth_dev); /* enable uio/vfio intr/eventfd mapping */ - rte_intr_enable(&pci_dev->intr_handle); + rte_intr_enable(pci_dev->intr_handle); /* enable support intr */ igb_intr_enable(eth_dev); + eth_igb_dev_set_link_down(eth_dev); + /* initialize filter info */ memset(filter_info, 0, sizeof(struct e1000_filter_info)); @@ -912,61 +889,12 @@ err_late: static int eth_igb_dev_uninit(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev; - struct rte_intr_handle *intr_handle; - struct e1000_hw *hw; - struct e1000_adapter *adapter = - E1000_DEV_PRIVATE(eth_dev->data->dev_private); - struct e1000_filter_info *filter_info = - E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); - PMD_INIT_FUNC_TRACE(); if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; - - hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - intr_handle = &pci_dev->intr_handle; - - if (adapter->stopped == 0) - eth_igb_close(eth_dev); - - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; - - /* Reset any pending lock */ - igb_reset_swfw_lock(hw); - - /* uninitialize PF if max_vfs not zero */ - igb_pf_host_uninit(eth_dev); - - /* disable uio intr before callback unregister */ - rte_intr_disable(intr_handle); - rte_intr_callback_unregister(intr_handle, - eth_igb_interrupt_handler, eth_dev); - - /* clear the SYN filter info */ - filter_info->syn_info = 0; - - /* clear the ethertype filters info */ - filter_info->ethertype_mask = 0; - memset(filter_info->ethertype_filters, 0, - E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter)); - - /* clear the rss filter info */ - memset(&filter_info->rss_info, 0, - sizeof(struct igb_rte_flow_rss_conf)); - - /* remove all ntuple filters of the device */ - igb_ntuple_filter_uninit(eth_dev); + return 0; - /* remove all flex filters of the device */ - igb_flex_filter_uninit(eth_dev); - - /* clear all the filters list */ - igb_filterlist_flush(eth_dev); + eth_igb_close(eth_dev); return 0; } @@ -990,6 +918,8 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); eth_dev->dev_ops = &igbvf_eth_dev_ops; + eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status; + eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status; eth_dev->rx_pkt_burst = ð_igb_recv_pkts; eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; eth_dev->tx_pkt_prepare = ð_igb_prep_pkts; @@ -1028,28 +958,23 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) diag = hw->mac.ops.reset_hw(hw); /* Allocate memory for storing MAC addresses */ - eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN * + eth_dev->data->mac_addrs = rte_zmalloc("igbvf", RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to store MAC " "addresses", - ETHER_ADDR_LEN * hw->mac.rar_entry_count); + RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); return -ENOMEM; } /* Generate a random MAC address, if none was assigned by PF. */ - if (is_zero_ether_addr(perm_addr)) { - eth_random_addr(perm_addr->addr_bytes); + if (rte_is_zero_ether_addr(perm_addr)) { + rte_eth_random_addr(perm_addr->addr_bytes); PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " - "%02x:%02x:%02x:%02x:%02x:%02x", - perm_addr->addr_bytes[0], - perm_addr->addr_bytes[1], - perm_addr->addr_bytes[2], - perm_addr->addr_bytes[3], - perm_addr->addr_bytes[4], - perm_addr->addr_bytes[5]); + RTE_ETHER_ADDR_PRT_FMT, + RTE_ETHER_ADDR_BYTES(perm_addr)); } diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0); @@ -1059,7 +984,7 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) return diag; } /* Copy the permanent MAC address */ - ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, ð_dev->data->mac_addrs[0]); PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x " @@ -1067,7 +992,7 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id, "igb_mac_82576_vf"); - intr_handle = &pci_dev->intr_handle; + intr_handle = pci_dev->intr_handle; rte_intr_callback_register(intr_handle, eth_igbvf_interrupt_handler, eth_dev); @@ -1077,27 +1002,12 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) static int eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev) { - struct e1000_adapter *adapter = - E1000_DEV_PRIVATE(eth_dev->data->dev_private); - struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - PMD_INIT_FUNC_TRACE(); if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; - - if (adapter->stopped == 0) - igbvf_dev_close(eth_dev); - - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; + return 0; - /* disable uio intr before callback unregister */ - rte_intr_disable(&pci_dev->intr_handle); - rte_intr_callback_unregister(&pci_dev->intr_handle, - eth_igbvf_interrupt_handler, - (void *)eth_dev); + igbvf_dev_close(eth_dev); return 0; } @@ -1116,8 +1026,7 @@ static int eth_igb_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_igb_pmd = { .id_table = pci_id_igb_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = eth_igb_pci_probe, .remove = eth_igb_pci_remove, }; @@ -1140,7 +1049,7 @@ static int eth_igbvf_pci_remove(struct rte_pci_device *pci_dev) */ static struct rte_pci_driver rte_igbvf_pmd = { .id_table = pci_id_igbvf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, .probe = eth_igbvf_pci_probe, .remove = eth_igbvf_pci_remove, }; @@ -1164,21 +1073,21 @@ igb_check_mq_mode(struct rte_eth_dev *dev) uint16_t nb_rx_q = dev->data->nb_rx_queues; uint16_t nb_tx_q = dev->data->nb_tx_queues; - if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) || - tx_mq_mode == ETH_MQ_TX_DCB || - tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) { + if ((rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) || + tx_mq_mode == RTE_ETH_MQ_TX_DCB || + tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) { PMD_INIT_LOG(ERR, "DCB mode is not supported."); return -EINVAL; } if (RTE_ETH_DEV_SRIOV(dev).active != 0) { /* Check multi-queue mode. - * To no break software we accept ETH_MQ_RX_NONE as this might + * To no break software we accept RTE_ETH_MQ_RX_NONE as this might * be used to turn off VLAN filter. */ - if (rx_mq_mode == ETH_MQ_RX_NONE || - rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) { - dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; + if (rx_mq_mode == RTE_ETH_MQ_RX_NONE || + rx_mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) { + dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY; RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; } else { /* Only support one queue on VFs. @@ -1190,12 +1099,12 @@ igb_check_mq_mode(struct rte_eth_dev *dev) return -EINVAL; } /* TX mode is not used here, so mode might be ignored.*/ - if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) { + if (tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) { /* SRIOV only works in VMDq enable mode */ PMD_INIT_LOG(WARNING, "SRIOV is active," " TX mode %d is not supported. " " Driver will behave as %d mode.", - tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY); + tx_mq_mode, RTE_ETH_MQ_TX_VMDQ_ONLY); } /* check valid queue number */ @@ -1208,17 +1117,17 @@ igb_check_mq_mode(struct rte_eth_dev *dev) /* To no break software that set invalid mode, only display * warning if invalid mode is used. */ - if (rx_mq_mode != ETH_MQ_RX_NONE && - rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY && - rx_mq_mode != ETH_MQ_RX_RSS) { + if (rx_mq_mode != RTE_ETH_MQ_RX_NONE && + rx_mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY && + rx_mq_mode != RTE_ETH_MQ_RX_RSS) { /* RSS together with VMDq not supported*/ PMD_INIT_LOG(ERR, "RX mode %d is not supported.", rx_mq_mode); return -EINVAL; } - if (tx_mq_mode != ETH_MQ_TX_NONE && - tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) { + if (tx_mq_mode != RTE_ETH_MQ_TX_NONE && + tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) { PMD_INIT_LOG(WARNING, "TX mode %d is not supported." " Due to txmode is meaningless in this" " driver, just ignore.", @@ -1237,6 +1146,9 @@ eth_igb_configure(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; + /* multipe queue mode checking */ ret = igb_check_mq_mode(dev); if (ret != 0) { @@ -1284,7 +1196,7 @@ eth_igb_start(struct rte_eth_dev *dev) struct e1000_adapter *adapter = E1000_DEV_PRIVATE(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; int ret, mask; uint32_t intr_vector = 0; uint32_t ctrl_ext; @@ -1322,7 +1234,8 @@ eth_igb_start(struct rte_eth_dev *dev) } adapter->stopped = 0; - E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN); + E1000_WRITE_REG(hw, E1000_VET, + RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN); ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); /* Set PF Reset Done bit so PF/VF Mail Ops can work */ @@ -1342,11 +1255,10 @@ eth_igb_start(struct rte_eth_dev *dev) return -1; } - if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { - intr_handle->intr_vec = - rte_zmalloc("intr_vec", - dev->data->nb_rx_queues * sizeof(int), 0); - if (intr_handle->intr_vec == NULL) { + /* Allocate the vector list */ + if (rte_intr_dp_is_en(intr_handle)) { + if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", + dev->data->nb_rx_queues)) { PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" " intr_vec", dev->data->nb_rx_queues); return -ENOMEM; @@ -1374,8 +1286,8 @@ eth_igb_start(struct rte_eth_dev *dev) /* * VLAN Offload Settings */ - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ - ETH_VLAN_EXTEND_MASK; + mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | + RTE_ETH_VLAN_EXTEND_MASK; ret = eth_igb_vlan_offload_set(dev, mask); if (ret) { PMD_INIT_LOG(ERR, "Unable to set vlan offload"); @@ -1383,7 +1295,7 @@ eth_igb_start(struct rte_eth_dev *dev) return ret; } - if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { + if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) { /* Enable VLAN filter since VMDq always use VLAN filter */ igb_vmdq_vlan_hw_filter_enable(dev); } @@ -1397,39 +1309,39 @@ eth_igb_start(struct rte_eth_dev *dev) /* Setup link speed and duplex */ speeds = &dev->data->dev_conf.link_speeds; - if (*speeds == ETH_LINK_SPEED_AUTONEG) { + if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) { hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; hw->mac.autoneg = 1; } else { num_speeds = 0; - autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; + autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0; /* Reset */ hw->phy.autoneg_advertised = 0; - if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | - ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | - ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) { + if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M | + RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M | + RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) { num_speeds = -1; goto error_invalid_config; } - if (*speeds & ETH_LINK_SPEED_10M_HD) { + if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) { hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; num_speeds++; } - if (*speeds & ETH_LINK_SPEED_10M) { + if (*speeds & RTE_ETH_LINK_SPEED_10M) { hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; num_speeds++; } - if (*speeds & ETH_LINK_SPEED_100M_HD) { + if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) { hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; num_speeds++; } - if (*speeds & ETH_LINK_SPEED_100M) { + if (*speeds & RTE_ETH_LINK_SPEED_100M) { hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; num_speeds++; } - if (*speeds & ETH_LINK_SPEED_1G) { + if (*speeds & RTE_ETH_LINK_SPEED_1G) { hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; num_speeds++; } @@ -1499,13 +1411,18 @@ error_invalid_config: * global reset on the MAC. * **********************************************************************/ -static void +static int eth_igb_stop(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_eth_link link; - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(dev->data->dev_private); + + if (adapter->stopped) + return 0; eth_igb_rxtx_control(dev, false); @@ -1517,8 +1434,9 @@ eth_igb_stop(struct rte_eth_dev *dev) igb_pf_reset_hw(hw); E1000_WRITE_REG(hw, E1000_WUC, 0); - /* Set bit for Go Link disconnect */ - if (hw->mac.type >= e1000_82580) { + /* Set bit for Go Link disconnect if PHY reset is not blocked */ + if (hw->mac.type >= e1000_82580 && + (e1000_check_reset_block(hw) != E1000_BLK_PHY_RESET)) { uint32_t phpm_reg; phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); @@ -1543,10 +1461,12 @@ eth_igb_stop(struct rte_eth_dev *dev) /* Clean datapath event and queue/vec mapping */ rte_intr_efd_disable(intr_handle); - if (intr_handle->intr_vec != NULL) { - rte_free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } + rte_intr_vec_list_free(intr_handle); + + adapter->stopped = true; + dev->data->dev_started = 0; + + return 0; } static int @@ -1575,25 +1495,29 @@ eth_igb_dev_set_link_down(struct rte_eth_dev *dev) return 0; } -static void +static int eth_igb_close(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct e1000_adapter *adapter = - E1000_DEV_PRIVATE(dev->data->dev_private); struct rte_eth_link link; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int ret; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; - eth_igb_stop(dev); - adapter->stopped = 1; + ret = eth_igb_stop(dev); e1000_phy_hw_reset(hw); igb_release_manageability(hw); igb_hw_control_release(hw); - /* Clear bit for Go Link disconnect */ - if (hw->mac.type >= e1000_82580) { + /* Clear bit for Go Link disconnect if PHY reset is not blocked */ + if (hw->mac.type >= e1000_82580 && + (e1000_check_reset_block(hw) != E1000_BLK_PHY_RESET)) { uint32_t phpm_reg; phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); @@ -1603,13 +1527,43 @@ eth_igb_close(struct rte_eth_dev *dev) igb_dev_free_queues(dev); - if (intr_handle->intr_vec) { - rte_free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } + /* Cleanup vector list */ + rte_intr_vec_list_free(intr_handle); memset(&link, 0, sizeof(link)); rte_eth_linkstatus_set(dev, &link); + + /* Reset any pending lock */ + igb_reset_swfw_lock(hw); + + /* uninitialize PF if max_vfs not zero */ + igb_pf_host_uninit(dev); + + rte_intr_callback_unregister(intr_handle, + eth_igb_interrupt_handler, dev); + + /* clear the SYN filter info */ + filter_info->syn_info = 0; + + /* clear the ethertype filters info */ + filter_info->ethertype_mask = 0; + memset(filter_info->ethertype_filters, 0, + E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter)); + + /* clear the rss filter info */ + memset(&filter_info->rss_info, 0, + sizeof(struct igb_rte_flow_rss_conf)); + + /* remove all ntuple filters of the device */ + igb_ntuple_filter_uninit(dev); + + /* remove all flex filters of the device */ + igb_flex_filter_uninit(dev); + + /* clear all the filters list */ + igb_filterlist_flush(dev); + + return ret; } /* @@ -1689,7 +1643,7 @@ igb_hardware_init(struct e1000_hw *hw) */ rx_buf_size = igb_get_rx_buffer_size(hw); - hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2); + hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2); hw->fc.low_water = hw->fc.high_water - 1500; hw->fc.pause_time = IGB_FC_PAUSE_TIME; hw->fc.send_xon = 1; @@ -1708,7 +1662,8 @@ igb_hardware_init(struct e1000_hw *hw) if (diag < 0) return diag; - E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN); + E1000_WRITE_REG(hw, E1000_VET, + RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN); e1000_get_phy_info(hw); e1000_check_for_link(hw); @@ -1772,10 +1727,10 @@ igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats) /* Workaround CRC bytes included in size, take away 4 bytes/packet */ stats->gorc += E1000_READ_REG(hw, E1000_GORCL); stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); - stats->gorc -= (stats->gprc - old_gprc) * ETHER_CRC_LEN; + stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN; stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); - stats->gotc -= (stats->gptc - old_gptc) * ETHER_CRC_LEN; + stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN; stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); stats->ruc += E1000_READ_REG(hw, E1000_RUC); @@ -1788,10 +1743,10 @@ igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats) stats->tor += E1000_READ_REG(hw, E1000_TORL); stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32); - stats->tor -= (stats->tpr - old_tpr) * ETHER_CRC_LEN; + stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; stats->tot += E1000_READ_REG(hw, E1000_TOTL); stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32); - stats->tot -= (stats->tpt - old_tpt) * ETHER_CRC_LEN; + stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN; stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); @@ -1825,10 +1780,10 @@ igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats) stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL); stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32); - stats->hgorc -= (stats->rpthc - old_rpthc) * ETHER_CRC_LEN; + stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN; stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL); stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32); - stats->hgotc -= (stats->hgptc - old_hgptc) * ETHER_CRC_LEN; + stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN; stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); @@ -1855,8 +1810,7 @@ eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) /* Rx Errors */ rte_stats->imissed = stats->mpc; - rte_stats->ierrors = stats->crcerrs + - stats->rlec + stats->ruc + stats->roc + + rte_stats->ierrors = stats->crcerrs + stats->rlec + stats->rxerrc + stats->algnerrc + stats->cexterr; /* Tx Errors */ @@ -1869,7 +1823,7 @@ eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) return 0; } -static void +static int eth_igb_stats_reset(struct rte_eth_dev *dev) { struct e1000_hw_stats *hw_stats = @@ -1880,9 +1834,11 @@ eth_igb_stats_reset(struct rte_eth_dev *dev) /* Reset software totals */ memset(hw_stats, 0, sizeof(*hw_stats)); + + return 0; } -static void +static int eth_igb_xstats_reset(struct rte_eth_dev *dev) { struct e1000_hw_stats *stats = @@ -1893,6 +1849,8 @@ eth_igb_xstats_reset(struct rte_eth_dev *dev) /* Reset software totals */ memset(stats, 0, sizeof(*stats)); + + return 0; } static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev, @@ -1915,7 +1873,7 @@ static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev, } static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev, - struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, + const uint64_t *ids, struct rte_eth_xstat_name *xstats_names, unsigned int limit) { unsigned int i; @@ -1934,7 +1892,7 @@ static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev, } else { struct rte_eth_xstat_name xstats_names_copy[IGB_NB_XSTATS]; - eth_igb_xstats_get_names_by_id(dev, xstats_names_copy, NULL, + eth_igb_xstats_get_names_by_id(dev, NULL, xstats_names_copy, IGB_NB_XSTATS); for (i = 0; i < limit; i++) { @@ -2126,7 +2084,7 @@ eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) return 0; } -static void +static int eth_igbvf_stats_reset(struct rte_eth_dev *dev) { struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*) @@ -2138,6 +2096,8 @@ eth_igbvf_stats_reset(struct rte_eth_dev *dev) /* reset HW current stats*/ memset(&hw_stats->gprc, 0, sizeof(*hw_stats) - offsetof(struct e1000_vf_stats, gprc)); + + return 0; } static int @@ -2184,15 +2144,17 @@ eth_igb_fw_version_get(struct rte_eth_dev *dev, char *fw_version, } break; } + if (ret < 0) + return -EINVAL; ret += 1; /* add the size of '\0' */ - if (fw_size < (u32)ret) + if (fw_size < (size_t)ret) return ret; else return 0; } -static void +static int eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -2206,6 +2168,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | dev_info->tx_queue_offload_capa; + dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; switch (hw->mac.type) { case e1000_82575: @@ -2217,21 +2180,21 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) case e1000_82576: dev_info->max_rx_queues = 16; dev_info->max_tx_queues = 16; - dev_info->max_vmdq_pools = ETH_8_POOLS; + dev_info->max_vmdq_pools = RTE_ETH_8_POOLS; dev_info->vmdq_queue_num = 16; break; case e1000_82580: dev_info->max_rx_queues = 8; dev_info->max_tx_queues = 8; - dev_info->max_vmdq_pools = ETH_8_POOLS; + dev_info->max_vmdq_pools = RTE_ETH_8_POOLS; dev_info->vmdq_queue_num = 8; break; case e1000_i350: dev_info->max_rx_queues = 8; dev_info->max_tx_queues = 8; - dev_info->max_vmdq_pools = ETH_8_POOLS; + dev_info->max_vmdq_pools = RTE_ETH_8_POOLS; dev_info->vmdq_queue_num = 8; break; @@ -2254,10 +2217,10 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) default: /* Should not happen */ - break; + return -EINVAL; } dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t); - dev_info->reta_size = ETH_RSS_RETA_SIZE_128; + dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128; dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL; dev_info->default_rxconf = (struct rte_eth_rxconf) { @@ -2283,13 +2246,14 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->rx_desc_lim = rx_desc_lim; dev_info->tx_desc_lim = tx_desc_lim; - dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | - ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | - ETH_LINK_SPEED_1G; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M | + RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M | + RTE_ETH_LINK_SPEED_1G; dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD; - dev_info->min_mtu = ETHER_MIN_MTU; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + return 0; } static const uint32_t * @@ -2319,7 +2283,7 @@ eth_igb_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } -static void +static int eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -2327,12 +2291,12 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ dev_info->max_mac_addrs = hw->mac.rar_entry_count; - dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT | + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_TSO; switch (hw->mac.type) { case e1000_vfadapt: dev_info->max_rx_queues = 2; @@ -2344,7 +2308,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) break; default: /* Should not happen */ - break; + return -EINVAL; } dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); @@ -2376,6 +2340,8 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->rx_desc_lim = rx_desc_lim; dev_info->tx_desc_lim = tx_desc_lim; + + return 0; } /* return 0 means link status changed, -1 means not changed */ @@ -2431,17 +2397,17 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) uint16_t duplex, speed; hw->mac.ops.get_link_up_info(hw, &speed, &duplex); link.link_duplex = (duplex == FULL_DUPLEX) ? - ETH_LINK_FULL_DUPLEX : - ETH_LINK_HALF_DUPLEX; + RTE_ETH_LINK_FULL_DUPLEX : + RTE_ETH_LINK_HALF_DUPLEX; link.link_speed = speed; - link.link_status = ETH_LINK_UP; + link.link_status = RTE_ETH_LINK_UP; link.link_autoneg = !(dev->data->dev_conf.link_speeds & - ETH_LINK_SPEED_FIXED); + RTE_ETH_LINK_SPEED_FIXED); } else if (!link_check) { link.link_speed = 0; - link.link_duplex = ETH_LINK_HALF_DUPLEX; - link.link_status = ETH_LINK_DOWN; - link.link_autoneg = ETH_LINK_FIXED; + link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; + link.link_status = RTE_ETH_LINK_DOWN; + link.link_autoneg = RTE_ETH_LINK_FIXED; } return rte_eth_linkstatus_set(dev, &link); @@ -2515,7 +2481,7 @@ igb_release_manageability(struct e1000_hw *hw) } } -static void +static int eth_igb_promiscuous_enable(struct rte_eth_dev *dev) { struct e1000_hw *hw = @@ -2525,9 +2491,11 @@ eth_igb_promiscuous_enable(struct rte_eth_dev *dev) rctl = E1000_READ_REG(hw, E1000_RCTL); rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; } -static void +static int eth_igb_promiscuous_disable(struct rte_eth_dev *dev) { struct e1000_hw *hw = @@ -2541,9 +2509,11 @@ eth_igb_promiscuous_disable(struct rte_eth_dev *dev) else rctl &= (~E1000_RCTL_MPE); E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; } -static void +static int eth_igb_allmulticast_enable(struct rte_eth_dev *dev) { struct e1000_hw *hw = @@ -2553,9 +2523,11 @@ eth_igb_allmulticast_enable(struct rte_eth_dev *dev) rctl = E1000_READ_REG(hw, E1000_RCTL); rctl |= E1000_RCTL_MPE; E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; } -static void +static int eth_igb_allmulticast_disable(struct rte_eth_dev *dev) { struct e1000_hw *hw = @@ -2563,10 +2535,12 @@ eth_igb_allmulticast_disable(struct rte_eth_dev *dev) uint32_t rctl; if (dev->data->promiscuous == 1) - return; /* must remain in all_multicast mode */ + return 0; /* must remain in all_multicast mode */ rctl = E1000_READ_REG(hw, E1000_RCTL); rctl &= (~E1000_RCTL_MPE); E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; } static int @@ -2609,7 +2583,7 @@ eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, qinq &= E1000_CTRL_EXT_EXT_VLAN; /* only outer TPID of double VLAN can be configured*/ - if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) { + if (qinq && vlan_type == RTE_ETH_VLAN_TYPE_OUTER) { reg = E1000_READ_REG(hw, E1000_VET); reg = (reg & (~E1000_VET_VET_EXT)) | ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT); @@ -2698,10 +2672,7 @@ igb_vlan_hw_extend_disable(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); /* Update maximum packet length */ - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) - E1000_WRITE_REG(hw, E1000_RLPML, - dev->data->dev_conf.rxmode.max_rx_pkt_len + - VLAN_TAG_SIZE); + E1000_WRITE_REG(hw, E1000_RLPML, dev->data->mtu + E1000_ETH_OVERHEAD); } static void @@ -2717,10 +2688,8 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); /* Update maximum packet length */ - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) - E1000_WRITE_REG(hw, E1000_RLPML, - dev->data->dev_conf.rxmode.max_rx_pkt_len + - 2 * VLAN_TAG_SIZE); + E1000_WRITE_REG(hw, E1000_RLPML, + dev->data->mtu + E1000_ETH_OVERHEAD + VLAN_TAG_SIZE); } static int @@ -2729,22 +2698,22 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask) struct rte_eth_rxmode *rxmode; rxmode = &dev->data->dev_conf.rxmode; - if(mask & ETH_VLAN_STRIP_MASK){ - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + if (mask & RTE_ETH_VLAN_STRIP_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) igb_vlan_hw_strip_enable(dev); else igb_vlan_hw_strip_disable(dev); } - if(mask & ETH_VLAN_FILTER_MASK){ - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + if (mask & RTE_ETH_VLAN_FILTER_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) igb_vlan_hw_filter_enable(dev); else igb_vlan_hw_filter_disable(dev); } - if(mask & ETH_VLAN_EXTEND_MASK){ - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + if (mask & RTE_ETH_VLAN_EXTEND_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) igb_vlan_hw_extend_enable(dev); else igb_vlan_hw_extend_disable(dev); @@ -2793,15 +2762,18 @@ eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev) { uint32_t mask, regval; + int ret; struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0; struct rte_eth_dev_info dev_info; memset(&dev_info, 0, sizeof(dev_info)); - eth_igb_infos_get(dev, &dev_info); + ret = eth_igb_infos_get(dev, &dev_info); + if (ret != 0) + return ret; mask = (0xFFFFFFFF >> (32 - dev_info.max_rx_queues)) << misc_shift; regval = E1000_READ_REG(hw, E1000_EIMS); @@ -2874,7 +2846,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev, } igb_intr_enable(dev); - rte_intr_enable(intr_handle); + rte_intr_ack(intr_handle); if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) { intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; @@ -2893,20 +2865,19 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev, " Port %d: Link Up - speed %u Mbps - %s", dev->data->port_id, (unsigned)link.link_speed, - link.link_duplex == ETH_LINK_FULL_DUPLEX ? + link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? "full-duplex" : "half-duplex"); } else { PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id); } - PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", + PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, - NULL); + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } return 0; @@ -2968,8 +2939,8 @@ void igbvf_mbx_process(struct rte_eth_dev *dev) /* dummy mbx read to ack pf */ if (mbx->ops.read(hw, &in_msg, 1, 0)) return; - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, - NULL); + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, + NULL); } } @@ -2985,7 +2956,7 @@ eth_igbvf_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *intr } igbvf_intr_enable(dev); - rte_intr_enable(intr_handle); + rte_intr_ack(intr_handle); return 0; } @@ -3048,13 +3019,13 @@ eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) rx_pause = 0; if (rx_pause && tx_pause) - fc_conf->mode = RTE_FC_FULL; + fc_conf->mode = RTE_ETH_FC_FULL; else if (rx_pause) - fc_conf->mode = RTE_FC_RX_PAUSE; + fc_conf->mode = RTE_ETH_FC_RX_PAUSE; else if (tx_pause) - fc_conf->mode = RTE_FC_TX_PAUSE; + fc_conf->mode = RTE_ETH_FC_TX_PAUSE; else - fc_conf->mode = RTE_FC_NONE; + fc_conf->mode = RTE_ETH_FC_NONE; return 0; } @@ -3073,6 +3044,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) uint32_t rx_buf_size; uint32_t max_high_water; uint32_t rctl; + uint32_t ctrl; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (fc_conf->autoneg != hw->mac.autoneg) @@ -3081,7 +3053,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); /* At least reserve one Ethernet frame for watermark */ - max_high_water = rx_buf_size - ETHER_MAX_LEN; + max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN; if ((fc_conf->high_water > max_high_water) || (fc_conf->high_water < fc_conf->low_water)) { PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); @@ -3110,6 +3082,39 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) rctl &= ~E1000_RCTL_PMCF; E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* + * check if we want to change flow control mode - driver doesn't have native + * capability to do that, so we'll write the registers ourselves + */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* + * set or clear E1000_CTRL_RFCE and E1000_CTRL_TFCE bits depending + * on configuration + */ + switch (fc_conf->mode) { + case RTE_ETH_FC_NONE: + ctrl &= ~E1000_CTRL_RFCE & ~E1000_CTRL_TFCE; + break; + case RTE_ETH_FC_RX_PAUSE: + ctrl |= E1000_CTRL_RFCE; + ctrl &= ~E1000_CTRL_TFCE; + break; + case RTE_ETH_FC_TX_PAUSE: + ctrl |= E1000_CTRL_TFCE; + ctrl &= ~E1000_CTRL_RFCE; + break; + case RTE_ETH_FC_FULL: + ctrl |= E1000_CTRL_RFCE | E1000_CTRL_TFCE; + break; + default: + PMD_INIT_LOG(ERR, "invalid flow control mode"); + return -EINVAL; + } + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); return 0; @@ -3137,7 +3142,7 @@ eth_igb_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index) { - uint8_t addr[ETHER_ADDR_LEN]; + uint8_t addr[RTE_ETHER_ADDR_LEN]; struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); memset(addr, 0, sizeof(addr)); @@ -3175,9 +3180,12 @@ igbvf_stop_adapter(struct rte_eth_dev *dev) u16 i; struct rte_eth_dev_info dev_info; struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; memset(&dev_info, 0, sizeof(dev_info)); - eth_igbvf_infos_get(dev, &dev_info); + ret = eth_igbvf_infos_get(dev, &dev_info); + if (ret != 0) + return; /* Clear interrupt mask to stop from interrupts being generated */ igbvf_intr_disable(hw); @@ -3245,19 +3253,22 @@ igbvf_dev_configure(struct rte_eth_dev *dev) PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", dev->data->port_id); + if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; + /* * VF has no ability to enable/disable HW CRC * Keep the persistent behavior the same as Host PF */ #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC - if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) { PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); - conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; + conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC; } #else - if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) { + if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) { PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); - conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; + conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; } #endif @@ -3272,7 +3283,7 @@ igbvf_dev_start(struct rte_eth_dev *dev) struct e1000_adapter *adapter = E1000_DEV_PRIVATE(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; int ret; uint32_t intr_vector = 0; @@ -3303,11 +3314,10 @@ igbvf_dev_start(struct rte_eth_dev *dev) return ret; } - if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { - intr_handle->intr_vec = - rte_zmalloc("intr_vec", - dev->data->nb_rx_queues * sizeof(int), 0); - if (!intr_handle->intr_vec) { + /* Allocate the vector list */ + if (rte_intr_dp_is_en(intr_handle)) { + if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", + dev->data->nb_rx_queues)) { PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" " intr_vec", dev->data->nb_rx_queues); return -ENOMEM; @@ -3325,11 +3335,16 @@ igbvf_dev_start(struct rte_eth_dev *dev) return 0; } -static void +static int igbvf_dev_stop(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(dev->data->dev_private); + + if (adapter->stopped) + return 0; PMD_INIT_FUNC_TRACE(); @@ -3348,26 +3363,35 @@ igbvf_dev_stop(struct rte_eth_dev *dev) /* Clean datapath event and queue/vec mapping */ rte_intr_efd_disable(intr_handle); - if (intr_handle->intr_vec) { - rte_free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } + + /* Clean vector list */ + rte_intr_vec_list_free(intr_handle); + + adapter->stopped = true; + dev->data->dev_started = 0; + + return 0; } -static void +static int igbvf_dev_close(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct e1000_adapter *adapter = - E1000_DEV_PRIVATE(dev->data->dev_private); struct rte_ether_addr addr; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + int ret; PMD_INIT_FUNC_TRACE(); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + e1000_reset_hw(hw); - igbvf_dev_stop(dev); - adapter->stopped = 1; + ret = igbvf_dev_stop(dev); + if (ret != 0) + return ret; + igb_dev_free_queues(dev); /** @@ -3378,18 +3402,26 @@ igbvf_dev_close(struct rte_eth_dev *dev) memset(&addr, 0, sizeof(addr)); igbvf_default_mac_addr_set(dev, &addr); + + rte_intr_callback_unregister(pci_dev->intr_handle, + eth_igbvf_interrupt_handler, + (void *)dev); + + return 0; } -static void +static int igbvf_promiscuous_enable(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); /* Set both unicast and multicast promisc */ e1000_promisc_set_vf(hw, e1000_promisc_enabled); + + return 0; } -static void +static int igbvf_promiscuous_disable(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -3399,9 +3431,11 @@ igbvf_promiscuous_disable(struct rte_eth_dev *dev) e1000_promisc_set_vf(hw, e1000_promisc_multicast); else e1000_promisc_set_vf(hw, e1000_promisc_disabled); + + return 0; } -static void +static int igbvf_allmulticast_enable(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -3409,9 +3443,11 @@ igbvf_allmulticast_enable(struct rte_eth_dev *dev) /* In promiscuous mode multicast promisc already set */ if (dev->data->promiscuous == 0) e1000_promisc_set_vf(hw, e1000_promisc_multicast); + + return 0; } -static void +static int igbvf_allmulticast_disable(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -3419,6 +3455,8 @@ igbvf_allmulticast_disable(struct rte_eth_dev *dev) /* In promiscuous mode leave multicast promisc enabled */ if (dev->data->promiscuous == 0) e1000_promisc_set_vf(hw, e1000_promisc_disabled); + + return 0; } static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on) @@ -3526,16 +3564,16 @@ eth_igb_rss_reta_update(struct rte_eth_dev *dev, uint16_t idx, shift; struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (reta_size != ETH_RSS_RETA_SIZE_128) { + if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); + "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128); return -EINVAL; } for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { - idx = i / RTE_RETA_GROUP_SIZE; - shift = i % RTE_RETA_GROUP_SIZE; + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; mask = (uint8_t)((reta_conf[idx].mask >> shift) & IGB_4_BIT_MASK); if (!mask) @@ -3567,16 +3605,16 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev, uint16_t idx, shift; struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (reta_size != ETH_RSS_RETA_SIZE_128) { + if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); + "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128); return -EINVAL; } for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { - idx = i / RTE_RETA_GROUP_SIZE; - shift = i % RTE_RETA_GROUP_SIZE; + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; mask = (uint8_t)((reta_conf[idx].mask >> shift) & IGB_4_BIT_MASK); if (!mask) @@ -3634,68 +3672,6 @@ eth_igb_syn_filter_set(struct rte_eth_dev *dev, return 0; } -static int -eth_igb_syn_filter_get(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter) -{ - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t synqf, rfctl; - - synqf = E1000_READ_REG(hw, E1000_SYNQF(0)); - if (synqf & E1000_SYN_FILTER_ENABLE) { - rfctl = E1000_READ_REG(hw, E1000_RFCTL); - filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0; - filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >> - E1000_SYN_FILTER_QUEUE_SHIFT); - return 0; - } - - return -ENOENT; -} - -static int -eth_igb_syn_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) -{ - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret; - - MAC_TYPE_FILTER_SUP(hw->mac.type); - - if (filter_op == RTE_ETH_FILTER_NOP) - return 0; - - if (arg == NULL) { - PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", - filter_op); - return -EINVAL; - } - - switch (filter_op) { - case RTE_ETH_FILTER_ADD: - ret = eth_igb_syn_filter_set(dev, - (struct rte_eth_syn_filter *)arg, - TRUE); - break; - case RTE_ETH_FILTER_DELETE: - ret = eth_igb_syn_filter_set(dev, - (struct rte_eth_syn_filter *)arg, - FALSE); - break; - case RTE_ETH_FILTER_GET: - ret = eth_igb_syn_filter_get(dev, - (struct rte_eth_syn_filter *)arg); - break; - default: - PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); - ret = -EINVAL; - break; - } - - return ret; -} - /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/ static inline int ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter, @@ -3705,7 +3681,7 @@ ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter, return -EINVAL; if (filter->priority > E1000_2TUPLE_MAX_PRI) return -EINVAL; /* filter index is out of range. */ - if (filter->tcp_flags > TCP_FLAG_ALL) + if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK) return -EINVAL; /* flags is invalid. */ switch (filter->dst_port_mask) { @@ -3785,18 +3761,18 @@ igb_inject_2uple_filter(struct rte_eth_dev *dev, ttqf &= ~E1000_TTQF_MASK_ENABLE; /* tcp flags bits setting. */ - if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { - if (filter->filter_info.tcp_flags & TCP_URG_FLAG) + if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) { + if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG) imir_ext |= E1000_IMIREXT_CTRL_URG; - if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) + if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG) imir_ext |= E1000_IMIREXT_CTRL_ACK; - if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) + if (filter->filter_info.tcp_flags & RTE_TCP_PSH_FLAG) imir_ext |= E1000_IMIREXT_CTRL_PSH; - if (filter->filter_info.tcp_flags & TCP_RST_FLAG) + if (filter->filter_info.tcp_flags & RTE_TCP_RST_FLAG) imir_ext |= E1000_IMIREXT_CTRL_RST; - if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) + if (filter->filter_info.tcp_flags & RTE_TCP_SYN_FLAG) imir_ext |= E1000_IMIREXT_CTRL_SYN; - if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) + if (filter->filter_info.tcp_flags & RTE_TCP_FIN_FLAG) imir_ext |= E1000_IMIREXT_CTRL_FIN; } else { imir_ext |= E1000_IMIREXT_CTRL_BP; @@ -4014,7 +3990,7 @@ igb_remove_flex_filter(struct rte_eth_dev *dev, int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, - struct rte_eth_flex_filter *filter, + struct igb_flex_filter *filter, bool add) { struct e1000_filter_info *filter_info = @@ -4086,102 +4062,6 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, return 0; } -static int -eth_igb_get_flex_filter(struct rte_eth_dev *dev, - struct rte_eth_flex_filter *filter) -{ - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct e1000_filter_info *filter_info = - E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - struct e1000_flex_filter flex_filter, *it; - uint32_t wufc, queueing, wufc_en = 0; - - memset(&flex_filter, 0, sizeof(struct e1000_flex_filter)); - flex_filter.filter_info.len = filter->len; - flex_filter.filter_info.priority = filter->priority; - memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len); - memcpy(flex_filter.filter_info.mask, filter->mask, - RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT); - - it = eth_igb_flex_filter_lookup(&filter_info->flex_list, - &flex_filter.filter_info); - if (it == NULL) { - PMD_DRV_LOG(ERR, "filter doesn't exist."); - return -ENOENT; - } - - wufc = E1000_READ_REG(hw, E1000_WUFC); - wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index); - - if ((wufc & wufc_en) == wufc_en) { - uint32_t reg_off = 0; - if (it->index < E1000_MAX_FHFT) - reg_off = E1000_FHFT(it->index); - else - reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT); - - queueing = E1000_READ_REG(hw, - reg_off + E1000_FHFT_QUEUEING_OFFSET); - filter->len = queueing & E1000_FHFT_QUEUEING_LEN; - filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >> - E1000_FHFT_QUEUEING_PRIO_SHIFT; - filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >> - E1000_FHFT_QUEUEING_QUEUE_SHIFT; - return 0; - } - return -ENOENT; -} - -static int -eth_igb_flex_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) -{ - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_eth_flex_filter *filter; - int ret = 0; - - MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); - - if (filter_op == RTE_ETH_FILTER_NOP) - return ret; - - if (arg == NULL) { - PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", - filter_op); - return -EINVAL; - } - - filter = (struct rte_eth_flex_filter *)arg; - if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN - || filter->len % sizeof(uint64_t) != 0) { - PMD_DRV_LOG(ERR, "filter's length is out of range"); - return -EINVAL; - } - if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) { - PMD_DRV_LOG(ERR, "filter's priority is out of range"); - return -EINVAL; - } - - switch (filter_op) { - case RTE_ETH_FILTER_ADD: - ret = eth_igb_add_del_flex_filter(dev, filter, TRUE); - break; - case RTE_ETH_FILTER_DELETE: - ret = eth_igb_add_del_flex_filter(dev, filter, FALSE); - break; - case RTE_ETH_FILTER_GET: - ret = eth_igb_get_flex_filter(dev, filter); - break; - default: - PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); - ret = -EINVAL; - break; - } - - return ret; -} - /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/ static inline int ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter, @@ -4191,7 +4071,7 @@ ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter, return -EINVAL; if (filter->priority > E1000_2TUPLE_MAX_PRI) return -EINVAL; /* filter index is out of range. */ - if (filter->tcp_flags > TCP_FLAG_ALL) + if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK) return -EINVAL; /* flags is invalid. */ switch (filter->dst_ip_mask) { @@ -4321,18 +4201,18 @@ igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev, imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; /* tcp flags bits setting. */ - if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { - if (filter->filter_info.tcp_flags & TCP_URG_FLAG) + if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) { + if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG) imir_ext |= E1000_IMIREXT_CTRL_URG; - if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) + if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG) imir_ext |= E1000_IMIREXT_CTRL_ACK; - if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) + if (filter->filter_info.tcp_flags & RTE_TCP_PSH_FLAG) imir_ext |= E1000_IMIREXT_CTRL_PSH; - if (filter->filter_info.tcp_flags & TCP_RST_FLAG) + if (filter->filter_info.tcp_flags & RTE_TCP_RST_FLAG) imir_ext |= E1000_IMIREXT_CTRL_RST; - if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) + if (filter->filter_info.tcp_flags & RTE_TCP_SYN_FLAG) imir_ext |= E1000_IMIREXT_CTRL_SYN; - if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) + if (filter->filter_info.tcp_flags & RTE_TCP_FIN_FLAG) imir_ext |= E1000_IMIREXT_CTRL_FIN; } else { imir_ext |= E1000_IMIREXT_CTRL_BP; @@ -4472,7 +4352,6 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { uint32_t rctl; struct e1000_hw *hw; - struct rte_eth_dev_info dev_info; uint32_t frame_size = mtu + E1000_ETH_OVERHEAD; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -4482,38 +4361,26 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) if (hw->mac.type == e1000_82571) return -ENOTSUP; #endif - eth_igb_infos_get(dev, &dev_info); - - /* check that mtu is within the allowed range */ - if ((mtu < ETHER_MIN_MTU) || - (frame_size > dev_info.max_rx_pktlen)) - return -EINVAL; - - /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. */ - if (!dev->data->scattered_rx && - frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + /* + * If device is started, refuse mtu that requires the support of + * scattered packets when this feature has not been enabled before. + */ + if (dev->data->dev_started && !dev->data->scattered_rx && + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { + PMD_INIT_LOG(ERR, "Stop port first."); return -EINVAL; + } rctl = E1000_READ_REG(hw, E1000_RCTL); /* switch to jumbo mode if needed */ - if (frame_size > ETHER_MAX_LEN) { - dev->data->dev_conf.rxmode.offloads |= - DEV_RX_OFFLOAD_JUMBO_FRAME; + if (mtu > RTE_ETHER_MTU) rctl |= E1000_RCTL_LPE; - } else { - dev->data->dev_conf.rxmode.offloads &= - ~DEV_RX_OFFLOAD_JUMBO_FRAME; + else rctl &= ~E1000_RCTL_LPE; - } E1000_WRITE_REG(hw, E1000_RCTL, rctl); - /* update max frame size */ - dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; - - E1000_WRITE_REG(hw, E1000_RLPML, - dev->data->dev_conf.rxmode.max_rx_pkt_len); + E1000_WRITE_REG(hw, E1000_RLPML, frame_size); return 0; } @@ -4569,126 +4436,6 @@ igb_add_del_ntuple_filter(struct rte_eth_dev *dev, return ret; } -/* - * igb_get_ntuple_filter - get a ntuple filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * ntuple_filter: Pointer to struct rte_eth_ntuple_filter - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -static int -igb_get_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter) -{ - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct e1000_filter_info *filter_info = - E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - struct e1000_5tuple_filter_info filter_5tuple; - struct e1000_2tuple_filter_info filter_2tuple; - struct e1000_5tuple_filter *p_5tuple_filter; - struct e1000_2tuple_filter *p_2tuple_filter; - int ret; - - switch (ntuple_filter->flags) { - case RTE_5TUPLE_FLAGS: - case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): - if (hw->mac.type != e1000_82576) - return -ENOTSUP; - memset(&filter_5tuple, - 0, - sizeof(struct e1000_5tuple_filter_info)); - ret = ntuple_filter_to_5tuple_82576(ntuple_filter, - &filter_5tuple); - if (ret < 0) - return ret; - p_5tuple_filter = igb_5tuple_filter_lookup_82576( - &filter_info->fivetuple_list, - &filter_5tuple); - if (p_5tuple_filter == NULL) { - PMD_DRV_LOG(ERR, "filter doesn't exist."); - return -ENOENT; - } - ntuple_filter->queue = p_5tuple_filter->queue; - break; - case RTE_2TUPLE_FLAGS: - case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): - if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350) - return -ENOTSUP; - memset(&filter_2tuple, - 0, - sizeof(struct e1000_2tuple_filter_info)); - ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple); - if (ret < 0) - return ret; - p_2tuple_filter = igb_2tuple_filter_lookup( - &filter_info->twotuple_list, - &filter_2tuple); - if (p_2tuple_filter == NULL) { - PMD_DRV_LOG(ERR, "filter doesn't exist."); - return -ENOENT; - } - ntuple_filter->queue = p_2tuple_filter->queue; - break; - default: - ret = -EINVAL; - break; - } - - return 0; -} - -/* - * igb_ntuple_filter_handle - Handle operations for ntuple filter. - * @dev: pointer to rte_eth_dev structure - * @filter_op:operation will be taken. - * @arg: a pointer to specific structure corresponding to the filter_op - */ -static int -igb_ntuple_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) -{ - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret; - - MAC_TYPE_FILTER_SUP(hw->mac.type); - - if (filter_op == RTE_ETH_FILTER_NOP) - return 0; - - if (arg == NULL) { - PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", - filter_op); - return -EINVAL; - } - - switch (filter_op) { - case RTE_ETH_FILTER_ADD: - ret = igb_add_del_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg, - TRUE); - break; - case RTE_ETH_FILTER_DELETE: - ret = igb_add_del_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg, - FALSE); - break; - case RTE_ETH_FILTER_GET: - ret = igb_get_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg); - break; - default: - PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); - ret = -EINVAL; - break; - } - return ret; -} - static inline int igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info, uint16_t ethertype) @@ -4744,8 +4491,8 @@ igb_add_del_ethertype_filter(struct rte_eth_dev *dev, uint32_t etqf = 0; int ret; - if (filter->ether_type == ETHER_TYPE_IPv4 || - filter->ether_type == ETHER_TYPE_IPv6) { + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" " ethertype filter.", filter->ether_type); return -EINVAL; @@ -4794,115 +4541,11 @@ igb_add_del_ethertype_filter(struct rte_eth_dev *dev, } static int -igb_get_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter) -{ - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct e1000_filter_info *filter_info = - E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - uint32_t etqf; - int ret; - - ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type); - if (ret < 0) { - PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", - filter->ether_type); - return -ENOENT; - } - - etqf = E1000_READ_REG(hw, E1000_ETQF(ret)); - if (etqf & E1000_ETQF_FILTER_ENABLE) { - filter->ether_type = etqf & E1000_ETQF_ETHERTYPE; - filter->flags = 0; - filter->queue = (etqf & E1000_ETQF_QUEUE) >> - E1000_ETQF_QUEUE_SHIFT; - return 0; - } - - return -ENOENT; -} - -/* - * igb_ethertype_filter_handle - Handle operations for ethertype filter. - * @dev: pointer to rte_eth_dev structure - * @filter_op:operation will be taken. - * @arg: a pointer to specific structure corresponding to the filter_op - */ -static int -igb_ethertype_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) -{ - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret; - - MAC_TYPE_FILTER_SUP(hw->mac.type); - - if (filter_op == RTE_ETH_FILTER_NOP) - return 0; - - if (arg == NULL) { - PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", - filter_op); - return -EINVAL; - } - - switch (filter_op) { - case RTE_ETH_FILTER_ADD: - ret = igb_add_del_ethertype_filter(dev, - (struct rte_eth_ethertype_filter *)arg, - TRUE); - break; - case RTE_ETH_FILTER_DELETE: - ret = igb_add_del_ethertype_filter(dev, - (struct rte_eth_ethertype_filter *)arg, - FALSE); - break; - case RTE_ETH_FILTER_GET: - ret = igb_get_ethertype_filter(dev, - (struct rte_eth_ethertype_filter *)arg); - break; - default: - PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); - ret = -EINVAL; - break; - } - return ret; -} - -static int -eth_igb_filter_ctrl(struct rte_eth_dev *dev, - enum rte_filter_type filter_type, - enum rte_filter_op filter_op, - void *arg) +eth_igb_flow_ops_get(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_ops **ops) { - int ret = 0; - - switch (filter_type) { - case RTE_ETH_FILTER_NTUPLE: - ret = igb_ntuple_filter_handle(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_ETHERTYPE: - ret = igb_ethertype_filter_handle(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_SYN: - ret = eth_igb_syn_filter_handle(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_FLEXIBLE: - ret = eth_igb_flex_filter_handle(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_GENERIC: - if (filter_op != RTE_ETH_FILTER_GET) - return -EINVAL; - *(const void **)arg = &igb_flow_ops; - break; - default: - PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", - filter_type); - break; - } - - return ret; + *ops = &igb_flow_ops; + return 0; } static int @@ -5027,8 +4670,7 @@ static void igb_start_timecounters(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct e1000_adapter *adapter = - (struct e1000_adapter *)dev->data->dev_private; + struct e1000_adapter *adapter = dev->data->dev_private; uint32_t incval = 1; uint32_t shift = 0; uint64_t mask = E1000_CYCLECOUNTER_MASK; @@ -5079,8 +4721,7 @@ igb_start_timecounters(struct rte_eth_dev *dev) static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) { - struct e1000_adapter *adapter = - (struct e1000_adapter *)dev->data->dev_private; + struct e1000_adapter *adapter = dev->data->dev_private; adapter->systime_tc.nsec += delta; adapter->rx_tstamp_tc.nsec += delta; @@ -5093,8 +4734,7 @@ static int igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) { uint64_t ns; - struct e1000_adapter *adapter = - (struct e1000_adapter *)dev->data->dev_private; + struct e1000_adapter *adapter = dev->data->dev_private; ns = rte_timespec_to_ns(ts); @@ -5110,8 +4750,7 @@ static int igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) { uint64_t ns, systime_cycles; - struct e1000_adapter *adapter = - (struct e1000_adapter *)dev->data->dev_private; + struct e1000_adapter *adapter = dev->data->dev_private; systime_cycles = igb_read_systime_cyclecounter(dev); ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); @@ -5156,7 +4795,7 @@ igb_timesync_enable(struct rte_eth_dev *dev) /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), - (ETHER_TYPE_1588 | + (RTE_ETHER_TYPE_1588 | E1000_ETQF_FILTER_ENABLE | E1000_ETQF_1588)); @@ -5204,8 +4843,7 @@ igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, uint32_t flags __rte_unused) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct e1000_adapter *adapter = - (struct e1000_adapter *)dev->data->dev_private; + struct e1000_adapter *adapter = dev->data->dev_private; uint32_t tsync_rxctl; uint64_t rx_tstamp_cycles; uint64_t ns; @@ -5226,8 +4864,7 @@ igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, struct timespec *timestamp) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct e1000_adapter *adapter = - (struct e1000_adapter *)dev->data->dev_private; + struct e1000_adapter *adapter = dev->data->dev_private; uint32_t tsync_txctl; uint64_t tx_tstamp_cycles; uint64_t ns; @@ -5442,9 +5079,6 @@ eth_igb_get_module_eeprom(struct rte_eth_dev *dev, u16 first_word, last_word; int i = 0; - if (info->length == 0) - return -EINVAL; - first_word = info->offset >> 1; last_word = (info->offset + info->length - 1) >> 1; @@ -5471,7 +5105,7 @@ eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; uint32_t vec = E1000_MISC_VEC_ID; if (rte_intr_allow_others(intr_handle)) @@ -5491,7 +5125,7 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; uint32_t vec = E1000_MISC_VEC_ID; if (rte_intr_allow_others(intr_handle)) @@ -5504,7 +5138,7 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); E1000_WRITE_FLUSH(hw); - rte_intr_enable(intr_handle); + rte_intr_ack(intr_handle); return 0; } @@ -5569,7 +5203,7 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev) uint32_t base = E1000_MISC_VEC_ID; uint32_t misc_shift = 0; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; /* won't configure msix register if no mapping is done * between intr vector and event fd @@ -5610,8 +5244,9 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | E1000_GPIE_PBA | E1000_GPIE_EIAME | E1000_GPIE_NSICR); - intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << - misc_shift; + intr_mask = + RTE_LEN2MASK(rte_intr_nb_efd_get(intr_handle), + uint32_t) << misc_shift; if (dev->data->dev_conf.intr_conf.lsc != 0) intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC); @@ -5629,8 +5264,8 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev) /* use EIAM to auto-mask when MSI-X interrupt * is asserted, this saves a register write for every interrupt */ - intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << - misc_shift; + intr_mask = RTE_LEN2MASK(rte_intr_nb_efd_get(intr_handle), + uint32_t) << misc_shift; if (dev->data->dev_conf.intr_conf.lsc != 0) intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC); @@ -5640,8 +5275,8 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev) for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) { eth_igb_assign_msix_vector(hw, 0, queue_id, vec); - intr_handle->intr_vec[queue_id] = vec; - if (vec < base + intr_handle->nb_efd - 1) + rte_intr_vec_list_index_set(intr_handle, queue_id, vec); + if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1) vec++; } @@ -5744,9 +5379,3 @@ RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map); RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci"); - -/* see e1000_logs.c */ -RTE_INIT(e1000_init_log) -{ - e1000_igb_init_log(); -}