X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_ethdev.c;h=c28d87e3f63b04387811925e58ac98246abf66cd;hb=92656e96dda5ce01d16fc751149c2763f315e6e5;hp=c226e0a9c44c2272d239ddd8cf2b49d8a37b24dc;hpb=a3bc447b403e0ceb56129dda7687b9cb00c6cbb8;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index c226e0a9c4..c28d87e3f6 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -182,12 +182,21 @@ static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n); static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n); +static int +ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int n); static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev); static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); -static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, - struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit); -static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, - struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit); +static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int size); +static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned limit); +static int ixgbe_dev_xstats_get_names_by_id( + struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, + unsigned int limit); static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, uint16_t queue_id, uint8_t stat_idx, @@ -239,8 +248,8 @@ static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *handle); static void ixgbe_dev_interrupt_handler(void *param); static void ixgbe_dev_interrupt_delayed_handler(void *param); -static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - uint32_t index, uint32_t pool); +static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, uint32_t pool); static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); @@ -296,9 +305,9 @@ static void ixgbe_configure_msix(struct rte_eth_dev *dev); static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate); -static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr, - uint32_t index, uint32_t pool); +static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *mac_addr, + uint32_t index, uint32_t pool); static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); @@ -523,9 +532,11 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .link_update = ixgbe_dev_link_update, .stats_get = ixgbe_dev_stats_get, .xstats_get = ixgbe_dev_xstats_get, + .xstats_get_by_id = ixgbe_dev_xstats_get_by_id, .stats_reset = ixgbe_dev_stats_reset, .xstats_reset = ixgbe_dev_xstats_reset, .xstats_get_names = ixgbe_dev_xstats_get_names, + .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id, .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, .fw_version_get = ixgbe_fw_version_get, .dev_infos_get = ixgbe_dev_info_get, @@ -1120,7 +1131,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); @@ -1354,7 +1365,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw; @@ -1587,7 +1598,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) { int diag; uint32_t tc, tcs; - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); @@ -1736,7 +1747,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw; @@ -2165,7 +2176,7 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) static int ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); switch (nb_rx_q) { case 1: @@ -2414,7 +2425,7 @@ ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, uint16_t total_rate = 0; struct rte_pci_device *pci_dev; - pci_dev = IXGBE_DEV_TO_PCI(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); rte_eth_link_get_nowait(dev->data->port_id, &link); if (vf >= pci_dev->max_vfs) @@ -2485,7 +2496,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; int err, link_up = 0, negotiate = 0; @@ -2703,7 +2714,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int vf; @@ -3127,7 +3138,7 @@ ixgbe_xstats_calc_num(void) { } static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, - struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit) + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size) { const unsigned cnt_stats = ixgbe_xstats_calc_num(); unsigned stat, i, count; @@ -3182,6 +3193,84 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, return cnt_stats; } +static int ixgbe_dev_xstats_get_names_by_id( + struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, + unsigned int limit) +{ + if (!ids) { + const unsigned int cnt_stats = ixgbe_xstats_calc_num(); + unsigned int stat, i, count; + + if (xstats_names != NULL) { + count = 0; + + /* Note: limit >= cnt_stats checked upstream + * in rte_eth_xstats_names() + */ + + /* Extended stats from ixgbe_hw_stats */ + for (i = 0; i < IXGBE_NB_HW_STATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", + rte_ixgbe_stats_strings[i].name); + count++; + } + + /* MACsec Stats */ + for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", + rte_ixgbe_macsec_strings[i].name); + count++; + } + + /* RX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_priority%u_%s", i, + rte_ixgbe_rxq_strings[stat].name); + count++; + } + } + + /* TX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_priority%u_%s", i, + rte_ixgbe_txq_strings[stat].name); + count++; + } + } + } + return cnt_stats; + } + + uint16_t i; + uint16_t size = ixgbe_xstats_calc_num(); + struct rte_eth_xstat_name xstats_names_copy[size]; + + ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL, + size); + + for (i = 0; i < limit; i++) { + if (ids[i] >= size) { + PMD_INIT_LOG(ERR, "id value isn't valid"); + return -1; + } + strcpy(xstats_names[i].name, + xstats_names_copy[ids[i]].name); + } + return limit; +} + static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, unsigned limit) { @@ -3272,6 +3361,97 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, return count; } +static int +ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int n) +{ + if (!ids) { + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_stats *hw_stats = + IXGBE_DEV_PRIVATE_TO_STATS( + dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); + uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; + unsigned int i, stat, count = 0; + + count = ixgbe_xstats_calc_num(); + + if (!ids && n < count) + return count; + + total_missed_rx = 0; + total_qbrc = 0; + total_qprc = 0; + total_qprdc = 0; + + ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, + &total_missed_rx, &total_qbrc, &total_qprc, + &total_qprdc); + + /* If this is a reset xstats is NULL, and we have cleared the + * registers by reading them. + */ + if (!ids && !values) + return 0; + + /* Extended stats from ixgbe_hw_stats */ + count = 0; + for (i = 0; i < IXGBE_NB_HW_STATS; i++) { + values[count] = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_stats_strings[i].offset); + count++; + } + + /* MACsec Stats */ + for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { + values[count] = *(uint64_t *)(((char *)macsec_stats) + + rte_ixgbe_macsec_strings[i].offset); + count++; + } + + /* RX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { + values[count] = + *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_rxq_strings[stat].offset + + (sizeof(uint64_t) * i)); + count++; + } + } + + /* TX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { + values[count] = + *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_txq_strings[stat].offset + + (sizeof(uint64_t) * i)); + count++; + } + } + return count; + } + + uint16_t i; + uint16_t size = ixgbe_xstats_calc_num(); + uint64_t values_copy[size]; + + ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size); + + for (i = 0; i < n; i++) { + if (ids[i] >= size) { + PMD_INIT_LOG(ERR, "id value isn't valid"); + return -1; + } + values[i] = values_copy[ids[i]]; + } + return n; +} + static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) { @@ -3402,7 +3582,7 @@ ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) static void ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_conf *dev_conf = &dev->data->dev_conf; @@ -3544,7 +3724,7 @@ static void ixgbevf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); dev_info->pci_dev = pci_dev; @@ -3603,8 +3783,12 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_link link, old; ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); int link_up; int diag; + u32 speed = 0; + bool autoneg = false; link.link_status = ETH_LINK_DOWN; link.link_speed = 0; @@ -3614,6 +3798,14 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) hw->mac.get_link_status = true; + if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) && + ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { + speed = hw->phy.autoneg_advertised; + if (!speed) + ixgbe_get_link_capabilities(hw, &speed, &autoneg); + ixgbe_setup_link(hw, speed, true); + } + /* check if it needs to wait to complete, if lsc interrupt is enabled */ if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) diag = ixgbe_check_link(hw, &link_speed, &link_up, 0); @@ -3631,10 +3823,12 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) if (link_up == 0) { rte_ixgbe_dev_atomic_write_link_status(dev, &link); + intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; if (link.link_status == old.link_status) return -1; return 0; } + intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; link.link_status = ETH_LINK_UP; link.link_duplex = ETH_LINK_FULL_DUPLEX; @@ -3841,7 +4035,7 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_eth_link link; memset(&link, 0, sizeof(link)); @@ -3913,14 +4107,15 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; ixgbe_dev_link_status_print(dev); - intr->mask_original = intr->mask; - /* only disable lsc interrupt */ - intr->mask &= ~IXGBE_EIMS_LSC; if (rte_eal_alarm_set(timeout * 1000, ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) PMD_DRV_LOG(ERR, "Error setting alarm"); - else - intr->mask = intr->mask_original; + else { + /* remember original mask */ + intr->mask_original = intr->mask; + /* only disable lsc interrupt */ + intr->mask &= ~IXGBE_EIMS_LSC; + } } PMD_DRV_LOG(DEBUG, "enable intr immediately"); @@ -3948,7 +4143,7 @@ static void ixgbe_dev_interrupt_delayed_handler(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); @@ -4443,14 +4638,15 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, return 0; } -static void +static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t enable_addr = 1; - ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr); + return ixgbe_set_rar(hw, index, mac_addr->addr_bytes, + pool, enable_addr); } static void @@ -4464,9 +4660,11 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + ixgbe_remove_rar(dev, 0); - ixgbe_add_rar(dev, addr, 0, 0); + ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); } static bool @@ -4492,6 +4690,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) struct ixgbe_hw *hw; struct rte_eth_dev_info dev_info; uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; ixgbe_dev_info_get(dev, &dev_info); @@ -4502,7 +4701,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* refuse mtu that requires the support of scattered packets when this * feature has not been enabled before. */ - if (!dev->data->scattered_rx && + if (!rx_conf->enable_scatter && (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) return -EINVAL; @@ -4600,7 +4799,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t intr_vector = 0; - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int err, mask = 0; @@ -4664,7 +4863,7 @@ static void ixgbevf_dev_stop(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; PMD_INIT_FUNC_TRACE(); @@ -5137,7 +5336,7 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t mask; struct ixgbe_hw *hw = @@ -5171,7 +5370,7 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t mask; struct ixgbe_hw *hw = @@ -5296,7 +5495,7 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, static void ixgbevf_configure_msix(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -5330,7 +5529,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) static void ixgbe_configure_msix(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -5446,7 +5645,7 @@ static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, return 0; } -static void +static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, __attribute__((unused)) uint32_t index, __attribute__((unused)) uint32_t pool) @@ -5460,11 +5659,19 @@ ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, * set of PF resources used to store VF MAC addresses. */ if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) - return; + return -1; diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); - if (diag == 0) - return; - PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag); + if (diag != 0) + PMD_DRV_LOG(ERR, "Unable to add MAC address " + "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d", + mac_addr->addr_bytes[0], + mac_addr->addr_bytes[1], + mac_addr->addr_bytes[2], + mac_addr->addr_bytes[3], + mac_addr->addr_bytes[4], + mac_addr->addr_bytes[5], + diag); + return diag; } static void @@ -5760,6 +5967,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { struct ixgbe_hw *hw; uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -5769,7 +5977,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) /* refuse mtu that requires the support of scattered packets when this * feature has not been enabled before. */ - if (!dev->data->scattered_rx && + if (!rx_conf->enable_scatter && (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) return -EINVAL; @@ -7323,7 +7531,7 @@ ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel, bool en) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); int ret = 0; uint32_t vmtir, vmvir; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -7658,7 +7866,7 @@ ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE); + hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI); } static void ixgbevf_mbx_process(struct rte_eth_dev *dev) @@ -7940,7 +8148,7 @@ ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); -RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio"); +RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); -RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio"); +RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");