X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_ethdev.c;h=a9203fa68be881be7ce5e6b461ac4bd9717ed6f2;hb=35b2d13fd6fdcbd191f2a30d74648faeb1186c65;hp=b3b46312d5cb8a8568bb0a2b1e5f3be74567e61f;hpb=216f78f4d53fa99f6a695fb7fdee24c11035e213;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index b3b46312d5..a9203fa68b 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -59,9 +60,6 @@ */ #define IXGBE_FC_LO 0x40 -/* Default minimum inter-interrupt interval for EITR configuration */ -#define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT 0x79E - /* Timer value included in XOFF frames. */ #define IXGBE_FC_PAUSE 0x680 @@ -100,8 +98,6 @@ #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) -#define IXGBE_HKEY_MAX_INDEX 10 - /* Additional timesync values. */ #define NSEC_PER_SEC 1000000000L #define IXGBE_INCVAL_10GB 0x66666666 @@ -117,7 +113,6 @@ #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 -#define DEFAULT_ETAG_ETYPE 0x893f #define IXGBE_ETAG_ETYPE 0x00005084 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff #define IXGBE_ETAG_ETYPE_VALID 0x80000000 @@ -132,7 +127,7 @@ #define IXGBE_EXVET_VET_EXT_SHIFT 16 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 -static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev); +static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); @@ -195,6 +190,9 @@ static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on); static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); +static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, + int mask); +static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask); static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); @@ -220,15 +218,17 @@ static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); -static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, - struct rte_intr_handle *handle); +static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); static void ixgbe_dev_interrupt_handler(void *param); static void ixgbe_dev_interrupt_delayed_handler(void *param); -static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - uint32_t index, uint32_t pool); +static void ixgbe_dev_setup_link_alarm_handler(void *param); + +static int ixgbe_add_rar(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool); static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); -static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr); +static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr); static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); static bool is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv); @@ -243,8 +243,8 @@ static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, static void ixgbevf_dev_stop(struct rte_eth_dev *dev); static void ixgbevf_dev_close(struct rte_eth_dev *dev); static int ixgbevf_dev_reset(struct rte_eth_dev *dev); -static void ixgbevf_intr_disable(struct ixgbe_hw *hw); -static void ixgbevf_intr_enable(struct ixgbe_hw *hw); +static void ixgbevf_intr_disable(struct rte_eth_dev *dev); +static void ixgbevf_intr_enable(struct rte_eth_dev *dev); static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); @@ -252,6 +252,7 @@ static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); +static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, @@ -261,12 +262,14 @@ static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); static void ixgbevf_configure_msix(struct rte_eth_dev *dev); +static void ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev); +static void ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev); static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); /* For Eth VMDQ APIs support */ static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct - ether_addr * mac_addr, uint8_t on); + rte_ether_addr * mac_addr, uint8_t on); static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, @@ -282,11 +285,11 @@ static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, static void ixgbe_configure_msix(struct rte_eth_dev *dev); static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr, + struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool); static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); -static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr); +static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr); static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, struct rte_eth_syn_filter *filter); static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev, @@ -313,7 +316,7 @@ static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr); static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info); @@ -327,6 +330,11 @@ static int ixgbe_get_eeprom(struct rte_eth_dev *dev, static int ixgbe_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom); +static int ixgbe_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo); +static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info); + static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); static int ixgbevf_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs); @@ -434,7 +442,6 @@ static const struct rte_pci_id pci_id_ixgbe_map[] = { { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, - { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, @@ -564,6 +571,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .get_eeprom_length = ixgbe_get_eeprom_length, .get_eeprom = ixgbe_get_eeprom, .set_eeprom = ixgbe_set_eeprom, + .get_module_info = ixgbe_get_module_info, + .get_module_eeprom = ixgbe_get_module_eeprom, .get_dcb_info = ixgbe_dev_get_dcb_info, .timesync_adjust_time = ixgbe_timesync_adjust_time, .timesync_read_time = ixgbe_timesync_read_time, @@ -591,6 +600,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .xstats_get_names = ixgbevf_dev_xstats_get_names, .dev_close = ixgbevf_dev_close, .dev_reset = ixgbevf_dev_reset, + .promiscuous_enable = ixgbevf_dev_promiscuous_enable, + .promiscuous_disable = ixgbevf_dev_promiscuous_disable, .allmulticast_enable = ixgbevf_dev_allmulticast_enable, .allmulticast_disable = ixgbevf_dev_allmulticast_disable, .dev_infos_get = ixgbevf_dev_info_get, @@ -1043,7 +1054,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) * It returns 0 on success. */ static int -eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) +eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; @@ -1114,6 +1125,14 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) return -EIO; } + if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { + PMD_INIT_LOG(ERR, "\nERROR: " + "Firmware recovery mode detected. Limiting functionality.\n" + "Refer to the Intel(R) Ethernet Adapters and Devices " + "User Guide for details on firmware recovery mode."); + return -EIO; + } + /* pick up the PCI bus settings for reporting later */ ixgbe_get_bus_info(hw); @@ -1194,26 +1213,26 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) ixgbe_reset_qstat_mappings(hw); /* Allocate memory for storing MAC addresses */ - eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * + eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %u bytes needed to store " "MAC addresses", - ETHER_ADDR_LEN * hw->mac.num_rar_entries); + RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } /* Copy the permanent MAC address */ - ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, ð_dev->data->mac_addrs[0]); /* Allocate memory for storing hash filter MAC addresses */ - eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * - IXGBE_VMDQ_NUM_UC_MAC, 0); + eth_dev->data->hash_mac_addrs = rte_zmalloc( + "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0); if (eth_dev->data->hash_mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses", - ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); + RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); return -ENOMEM; } @@ -1292,7 +1311,7 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; + return 0; hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); @@ -1323,14 +1342,14 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) rte_delay_ms(100); } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); - /* uninitialize PF if max_vfs not zero */ - ixgbe_pf_host_uninit(eth_dev); + /* cancel the delay handler before remove dev */ + rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, eth_dev); - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; + /* cancel the link handler before remove dev */ + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, eth_dev); - rte_free(eth_dev->data->hash_mac_addrs); - eth_dev->data->hash_mac_addrs = NULL; + /* uninitialize PF if max_vfs not zero */ + ixgbe_pf_host_uninit(eth_dev); /* remove all the fdir filters & hash */ ixgbe_fdir_filter_uninit(eth_dev); @@ -1483,7 +1502,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) } l2_tn_info->e_tag_en = FALSE; l2_tn_info->e_tag_fwd_en = FALSE; - l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE; + l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; return 0; } @@ -1501,6 +1520,7 @@ ixgbevf_negotiate_api(struct ixgbe_hw *hw) /* start with highest supported, proceed down */ static const enum ixgbe_pfvf_api_rev sup_ver[] = { + ixgbe_mbox_api_13, ixgbe_mbox_api_12, ixgbe_mbox_api_11, ixgbe_mbox_api_10, @@ -1514,7 +1534,7 @@ ixgbevf_negotiate_api(struct ixgbe_hw *hw) } static void -generate_random_mac_addr(struct ether_addr *mac_addr) +generate_random_mac_addr(struct rte_ether_addr *mac_addr) { uint64_t random; @@ -1523,7 +1543,7 @@ generate_random_mac_addr(struct ether_addr *mac_addr) mac_addr->addr_bytes[1] = 0x09; mac_addr->addr_bytes[2] = 0xC0; /* Force indication of locally assigned MAC address. */ - mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR; + mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; /* Generate the last 3 bytes of the MAC address with a random number. */ random = rte_rand(); memcpy(&mac_addr->addr_bytes[3], &random, 3); @@ -1545,7 +1565,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); - struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; + struct rte_ether_addr *perm_addr = + (struct rte_ether_addr *)hw->mac.perm_addr; PMD_INIT_FUNC_TRACE(); @@ -1602,7 +1623,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) ixgbevf_dev_stats_reset(eth_dev); /* Disable the interrupts for VF */ - ixgbevf_intr_disable(hw); + ixgbevf_intr_disable(eth_dev); hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ diag = hw->mac.ops.reset_hw(hw); @@ -1614,7 +1635,12 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) */ if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); - return diag; + /* + * This error code will be propagated to the app by + * rte_eth_dev_reset, so use a public error code rather than + * the internal-only IXGBE_ERR_RESET_FAILED + */ + return -EAGAIN; } /* negotiate mailbox API version to use with the PF. */ @@ -1624,18 +1650,18 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) ixgbevf_get_queues(hw, &tcs, &tc); /* Allocate memory for storing MAC addresses */ - eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * + eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %u bytes needed to store " "MAC addresses", - ETHER_ADDR_LEN * hw->mac.num_rar_entries); + RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } /* Generate a random MAC address, if none was assigned by PF. */ - if (is_zero_ether_addr(perm_addr)) { + if (rte_is_zero_ether_addr(perm_addr)) { generate_random_mac_addr(perm_addr); diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); if (diag) { @@ -1655,7 +1681,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) } /* Copy the permanent MAC address */ - ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); + rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); /* reset the hardware with the new settings */ diag = hw->mac.ops.start_hw(hw); @@ -1671,7 +1697,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) rte_intr_callback_register(intr_handle, ixgbevf_dev_interrupt_handler, eth_dev); rte_intr_enable(intr_handle); - ixgbevf_intr_enable(hw); + ixgbevf_intr_enable(eth_dev); PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", eth_dev->data->port_id, pci_dev->id.vendor_id, @@ -1692,7 +1718,7 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; + return 0; hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); @@ -1704,10 +1730,7 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) eth_dev->tx_pkt_burst = NULL; /* Disable the interrupts for VF */ - ixgbevf_intr_disable(hw); - - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; + ixgbevf_intr_disable(eth_dev); rte_intr_disable(intr_handle); rte_intr_callback_unregister(intr_handle, @@ -1716,16 +1739,81 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) return 0; } -static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, - struct rte_pci_device *pci_dev) -{ - return rte_eth_dev_pci_generic_probe(pci_dev, - sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init); +static int +eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *pf_ethdev; + struct rte_eth_devargs eth_da; + int i, retval; + + if (pci_dev->device.devargs) { + retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, + ð_da); + if (retval) + return retval; + } else + memset(ð_da, 0, sizeof(eth_da)); + + retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, + sizeof(struct ixgbe_adapter), + eth_dev_pci_specific_init, pci_dev, + eth_ixgbe_dev_init, NULL); + + if (retval || eth_da.nb_representor_ports < 1) + return retval; + + pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); + if (pf_ethdev == NULL) + return -ENODEV; + + /* probe VF representor ports */ + for (i = 0; i < eth_da.nb_representor_ports; i++) { + struct ixgbe_vf_info *vfinfo; + struct ixgbe_vf_representor representor; + + vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( + pf_ethdev->data->dev_private); + if (vfinfo == NULL) { + PMD_DRV_LOG(ERR, + "no virtual functions supported by PF"); + break; + } + + representor.vf_id = eth_da.representor_ports[i]; + representor.switch_domain_id = vfinfo->switch_domain_id; + representor.pf_ethdev = pf_ethdev; + + /* representor port net_bdf_port */ + snprintf(name, sizeof(name), "net_%s_representor_%d", + pci_dev->device.name, + eth_da.representor_ports[i]); + + retval = rte_eth_dev_create(&pci_dev->device, name, + sizeof(struct ixgbe_vf_representor), NULL, NULL, + ixgbe_vf_representor_init, &representor); + + if (retval) + PMD_DRV_LOG(ERR, "failed to create ixgbe vf " + "representor %s.", name); + } + + return 0; } static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit); + struct rte_eth_dev *ethdev; + + ethdev = rte_eth_dev_allocated(pci_dev->device.name); + if (!ethdev) + return -ENODEV; + + if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) + return rte_eth_dev_destroy(ethdev, ixgbe_vf_representor_uninit); + else + return rte_eth_dev_destroy(ethdev, eth_ixgbe_dev_uninit); } static struct rte_pci_driver rte_ixgbe_pmd = { @@ -1908,10 +1996,13 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) rxq = dev->data->rx_queues[queue]; - if (on) + if (on) { rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; - else + rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } else { rxq->vlan_flags = PKT_RX_VLAN; + rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } } static void @@ -2063,22 +2154,47 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) } } +static void +ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) +{ + uint16_t i; + struct rte_eth_rxmode *rxmode; + struct ixgbe_rx_queue *rxq; + + if (mask & ETH_VLAN_STRIP_MASK) { + rxmode = &dev->data->dev_conf.rxmode; + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } + else + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } + } +} + static int -ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) +ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) { + struct rte_eth_rxmode *rxmode; + rxmode = &dev->data->dev_conf.rxmode; + if (mask & ETH_VLAN_STRIP_MASK) { ixgbe_vlan_hw_strip_config(dev); } if (mask & ETH_VLAN_FILTER_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ixgbe_vlan_hw_filter_enable(dev); else ixgbe_vlan_hw_filter_disable(dev); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) ixgbe_vlan_hw_extend_enable(dev); else ixgbe_vlan_hw_extend_disable(dev); @@ -2087,6 +2203,16 @@ ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) return 0; } +static int +ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + ixgbe_config_vlan_strip_on_all_queues(dev, mask); + + ixgbe_vlan_offload_config(dev, mask); + + return 0; +} + static void ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) { @@ -2235,11 +2361,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { const struct rte_eth_dcb_rx_conf *conf; - if (nb_rx_q != IXGBE_DCB_NB_QUEUES) { - PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.", - IXGBE_DCB_NB_QUEUES); - return -EINVAL; - } conf = &dev_conf->rx_adv_conf.dcb_rx_conf; if (!(conf->nb_tcs == ETH_4_TCS || conf->nb_tcs == ETH_8_TCS)) { @@ -2253,11 +2374,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { const struct rte_eth_dcb_tx_conf *conf; - if (nb_tx_q != IXGBE_DCB_NB_QUEUES) { - PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.", - IXGBE_DCB_NB_QUEUES); - return -EINVAL; - } conf = &dev_conf->tx_adv_conf.dcb_tx_conf; if (!(conf->nb_tcs == ETH_4_TCS || conf->nb_tcs == ETH_8_TCS)) { @@ -2426,6 +2542,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) uint32_t intr_vector = 0; int err, link_up = 0, negotiate = 0; uint32_t speed = 0; + uint32_t allowed_speeds = 0; int mask = 0; int status; uint16_t vf, idx; @@ -2446,6 +2563,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev) return -EINVAL; } + /* Stop the link setup handler before resetting the HW. */ + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); + /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); @@ -2507,7 +2627,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - err = ixgbe_vlan_offload_set(dev, mask); + err = ixgbe_vlan_offload_config(dev, mask); if (err) { PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); goto error; @@ -2546,10 +2666,16 @@ ixgbe_dev_start(struct rte_eth_dev *dev) goto error; } - /* Skip link setup if loopback mode is enabled for 82599. */ - if (hw->mac.type == ixgbe_mac_82599EB && - dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) - goto skip_link_setup; + /* Skip link setup if loopback mode is enabled. */ + if (dev->data->dev_conf.lpbk_mode != 0) { + err = ixgbe_check_supported_loopback_mode(dev); + if (err < 0) { + PMD_INIT_LOG(ERR, "Unsupported loopback mode"); + goto error; + } else { + goto skip_link_setup; + } + } if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { err = hw->mac.ops.setup_sfp(hw); @@ -2574,9 +2700,25 @@ ixgbe_dev_start(struct rte_eth_dev *dev) if (err) goto error; + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G | + ETH_LINK_SPEED_10G; + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) + allowed_speeds = ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; + break; + default: + allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_10G; + } + link_speeds = &dev->data->dev_conf.link_speeds; - if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | - ETH_LINK_SPEED_10G)) { + if (*link_speeds & ~allowed_speeds) { PMD_INIT_LOG(ERR, "Invalid link setting"); goto error; } @@ -2602,10 +2744,16 @@ ixgbe_dev_start(struct rte_eth_dev *dev) } else { if (*link_speeds & ETH_LINK_SPEED_10G) speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_5G) + speed |= IXGBE_LINK_SPEED_5GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_2_5G) + speed |= IXGBE_LINK_SPEED_2_5GB_FULL; if (*link_speeds & ETH_LINK_SPEED_1G) speed |= IXGBE_LINK_SPEED_1GB_FULL; if (*link_speeds & ETH_LINK_SPEED_100M) speed |= IXGBE_LINK_SPEED_100_FULL; + if (*link_speeds & ETH_LINK_SPEED_10M) + speed |= IXGBE_LINK_SPEED_10_FULL; } err = ixgbe_setup_link(hw, speed, link_up); @@ -2647,6 +2795,12 @@ skip_link_setup: "please call hierarchy_commit() " "before starting the port"); + /* + * Update link status right before return, because it may + * start link configuration process in a separate thread. + */ + ixgbe_dev_link_update(dev, 0); + return 0; error: @@ -2662,6 +2816,8 @@ static void ixgbe_dev_stop(struct rte_eth_dev *dev) { struct rte_eth_link link; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = @@ -2674,6 +2830,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); + /* disable interrupts */ ixgbe_disable_intr(hw); @@ -2720,6 +2878,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) /* reset hierarchy commit */ tm_conf->committed = false; + + adapter->rss_reta_updated = 0; } /* @@ -2827,7 +2987,7 @@ ixgbe_dev_reset(struct rte_eth_dev *dev) if (ret) return ret; - ret = eth_ixgbe_dev_init(dev); + ret = eth_ixgbe_dev_init(dev, NULL); return ret; } @@ -2895,7 +3055,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, hw_stats->qbrc[i] += ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); if (crc_strip == 0) - hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN; + hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN; hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); hw_stats->qbtc[i] += @@ -2940,12 +3100,12 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); if (crc_strip == 0) - hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN; + hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN; uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); hw_stats->gptc += delta_gptc; - hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN; - hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN; + hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN; + hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; /* * Workaround: mprc hardware is incorrectly counting @@ -2975,7 +3135,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, hw_stats->gptc -= total; hw_stats->mptc -= total; hw_stats->ptc64 -= total; - hw_stats->gotc -= total * ETHER_MIN_LEN; + hw_stats->gotc -= total * RTE_ETHER_MIN_LEN; hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); @@ -3003,9 +3163,18 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, } /* Flow Director Stats registers */ - hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); - hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); - + if (hw->mac.type != ixgbe_mac_82598EB) { + hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + hw_stats->fdirustat_add += IXGBE_READ_REG(hw, + IXGBE_FDIRUSTAT) & 0xFFFF; + hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw, + IXGBE_FDIRUSTAT) >> 16) & 0xFFFF; + hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw, + IXGBE_FDIRFSTAT) & 0xFFFF; + hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw, + IXGBE_FDIRFSTAT) >> 16) & 0xFFFF; + } /* MACsec Stats registers */ macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); macsec_stats->out_pkts_encrypted += @@ -3136,19 +3305,17 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, /* Extended stats from ixgbe_hw_stats */ for (i = 0; i < IXGBE_NB_HW_STATS; i++) { - snprintf(xstats_names[count].name, - sizeof(xstats_names[count].name), - "%s", - rte_ixgbe_stats_strings[i].name); + strlcpy(xstats_names[count].name, + rte_ixgbe_stats_strings[i].name, + sizeof(xstats_names[count].name)); count++; } /* MACsec Stats */ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { - snprintf(xstats_names[count].name, - sizeof(xstats_names[count].name), - "%s", - rte_ixgbe_macsec_strings[i].name); + strlcpy(xstats_names[count].name, + rte_ixgbe_macsec_strings[i].name, + sizeof(xstats_names[count].name)); count++; } @@ -3196,19 +3363,17 @@ static int ixgbe_dev_xstats_get_names_by_id( /* Extended stats from ixgbe_hw_stats */ for (i = 0; i < IXGBE_NB_HW_STATS; i++) { - snprintf(xstats_names[count].name, - sizeof(xstats_names[count].name), - "%s", - rte_ixgbe_stats_strings[i].name); + strlcpy(xstats_names[count].name, + rte_ixgbe_stats_strings[i].name, + sizeof(xstats_names[count].name)); count++; } /* MACsec Stats */ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { - snprintf(xstats_names[count].name, - sizeof(xstats_names[count].name), - "%s", - rte_ixgbe_macsec_strings[i].name); + strlcpy(xstats_names[count].name, + rte_ixgbe_macsec_strings[i].name, + sizeof(xstats_names[count].name)); count++; } @@ -3265,9 +3430,9 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, if (xstats_names != NULL) for (i = 0; i < IXGBEVF_NB_XSTATS; i++) - snprintf(xstats_names[i].name, - sizeof(xstats_names[i].name), - "%s", rte_ixgbevf_stats_strings[i].name); + strlcpy(xstats_names[i].name, + rte_ixgbevf_stats_strings[i].name, + sizeof(xstats_names[i].name)); return IXGBEVF_NB_XSTATS; } @@ -3571,7 +3736,6 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_conf *dev_conf = &dev->data->dev_conf; - dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; if (RTE_ETH_DEV_SRIOV(dev).active == 0) { @@ -3592,55 +3756,14 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_vmdq_pools = ETH_16_POOLS; else dev_info->max_vmdq_pools = ETH_64_POOLS; + dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; dev_info->vmdq_queue_num = dev_info->max_rx_queues; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; - - /* - * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV - * mode. - */ - if ((hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X540) && - !RTE_ETH_DEV_SRIOV(dev).active) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO; - - if (hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X540) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP; - - if (hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x || - hw->mac.type == ixgbe_mac_X550EM_a) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; - - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; - - if (hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X540) - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT; - - if (hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x || - hw->mac.type == ixgbe_mac_X550EM_a) - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; - -#ifdef RTE_LIBRTE_SECURITY - if (dev->security_ctx) { - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY; - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; - } -#endif + dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); + dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | + dev_info->rx_queue_offload_capa); + dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); + dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -3650,6 +3773,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -3660,8 +3784,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; @@ -3682,6 +3805,14 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa |= ETH_LINK_SPEED_2_5G; dev_info->speed_capa |= ETH_LINK_SPEED_5G; } + + /* Driver-preferred Rx/Tx parameters */ + dev_info->default_rxportconf.burst_size = 32; + dev_info->default_txportconf.burst_size = 32; + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_rxportconf.ring_size = 256; + dev_info->default_txportconf.ring_size = 256; } static const uint32_t * @@ -3730,11 +3861,11 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */ + dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; dev_info->max_vfs = pci_dev->max_vfs; @@ -3742,17 +3873,11 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, dev_info->max_vmdq_pools = ETH_16_POOLS; else dev_info->max_vmdq_pools = ETH_64_POOLS; - dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; - dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); + dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | + dev_info->rx_queue_offload_capa); + dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); + dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -3762,6 +3887,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, }, .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -3772,8 +3898,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, }, .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; @@ -3784,11 +3909,6 @@ static int ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, int *link_up, int wait_to_complete) { - /** - * for a quick link status checking, wait_to_compelet == 0, - * skip PF link status checking - */ - bool no_pflink_check = wait_to_complete == 0; struct ixgbe_mbx_info *mbx = &hw->mbx; struct ixgbe_mac_info *mac = &hw->mac; uint32_t links_reg, in_msg; @@ -3809,7 +3929,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs * before the link status is correct */ - if (mac->type == ixgbe_mac_82599_vf) { + if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) { int i; for (i = 0; i < 5; i++) { @@ -3849,14 +3969,6 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, *speed = IXGBE_LINK_SPEED_UNKNOWN; } - if (no_pflink_check) { - if (*speed == IXGBE_LINK_SPEED_UNKNOWN) - mac->get_link_status = true; - else - mac->get_link_status = false; - - goto out; - } /* if the read failed it could just be a mailbox collision, best wait * until we are called again and don't report an error */ @@ -3866,7 +3978,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { /* msg is not CTS and is NACK we must have lost CTS status */ if (in_msg & IXGBE_VT_MSGTYPE_NACK) - ret_val = -1; + mac->get_link_status = false; goto out; } @@ -3886,8 +3998,27 @@ out: return ret_val; } +static void +ixgbe_dev_setup_link_alarm_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + u32 speed; + bool autoneg = false; + + speed = hw->phy.autoneg_advertised; + if (!speed) + ixgbe_get_link_capabilities(hw, &speed, &autoneg); + + ixgbe_setup_link(hw, speed, true); + + intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; +} + /* return 0 means link status changed, -1 means not changed */ -static int +int ixgbe_dev_link_update_share(struct rte_eth_dev *dev, int wait_to_complete, int vf) { @@ -3898,25 +4029,18 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); int link_up; int diag; - u32 speed = 0; int wait = 1; - bool autoneg = false; memset(&link, 0, sizeof(link)); link.link_status = ETH_LINK_DOWN; - link.link_speed = 0; + link.link_speed = ETH_SPEED_NUM_NONE; link.link_duplex = ETH_LINK_HALF_DUPLEX; link.link_autoneg = ETH_LINK_AUTONEG; hw->mac.get_link_status = true; - if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) && - ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { - speed = hw->phy.autoneg_advertised; - if (!speed) - ixgbe_get_link_capabilities(hw, &speed, &autoneg); - ixgbe_setup_link(hw, speed, true); - } + if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) + return rte_eth_linkstatus_set(dev, &link); /* check if it needs to wait to complete, if lsc interrupt is enabled */ if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) @@ -3934,19 +4058,26 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, } if (link_up == 0) { - intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { + intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + rte_eal_alarm_set(10, + ixgbe_dev_setup_link_alarm_handler, dev); + } return rte_eth_linkstatus_set(dev, &link); } - intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; link.link_status = ETH_LINK_UP; link.link_duplex = ETH_LINK_FULL_DUPLEX; switch (link_speed) { default: case IXGBE_LINK_SPEED_UNKNOWN: + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) + link.link_speed = ETH_SPEED_NUM_10M; + else + link.link_speed = ETH_SPEED_NUM_100M; link.link_duplex = ETH_LINK_FULL_DUPLEX; - link.link_speed = ETH_SPEED_NUM_100M; break; case IXGBE_LINK_SPEED_100_FULL: @@ -4199,8 +4330,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev) * - On failure, a negative value. */ static int -ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, - struct rte_intr_handle *intr_handle) +ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); @@ -4251,7 +4381,6 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, PMD_DRV_LOG(DEBUG, "enable intr immediately"); ixgbe_enable_intr(dev); - rte_intr_enable(intr_handle); return 0; } @@ -4334,7 +4463,7 @@ ixgbe_dev_interrupt_handler(void *param) struct rte_eth_dev *dev = (struct rte_eth_dev *)param; ixgbe_dev_interrupt_get_status(dev); - ixgbe_dev_interrupt_action(dev, dev->intr_handle); + ixgbe_dev_interrupt_action(dev); } static int @@ -4429,7 +4558,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * At least reserve one Ethernet frame for watermark * high_water/low_water in kilo bytes for ixgbe */ - max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; + max_high_water = (rx_buf_size - + RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; if ((fc_conf->high_water > max_high_water) || (fc_conf->high_water < fc_conf->low_water)) { PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); @@ -4650,7 +4780,8 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p * At least reserve one Ethernet frame for watermark * high_water/low_water in kilo bytes for ixgbe */ - max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; + max_high_water = (rx_buf_size - + RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; if ((pfc_conf->fc.high_water > max_high_water) || (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); @@ -4683,6 +4814,8 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, uint8_t j, mask; uint32_t reta, r; uint16_t idx, shift; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t reta_reg; @@ -4724,6 +4857,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, } IXGBE_WRITE_REG(hw, reta_reg, reta); } + adapter->rss_reta_updated = 1; return 0; } @@ -4771,7 +4905,7 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, } static int -ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, +ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -4789,14 +4923,15 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) ixgbe_clear_rar(hw, index); } -static void -ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) +static int +ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); ixgbe_remove_rar(dev, 0); - ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); + + return 0; } static bool @@ -4821,13 +4956,13 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) uint32_t maxfrs; struct ixgbe_hw *hw; struct rte_eth_dev_info dev_info; - uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD; struct rte_eth_dev_data *dev_data = dev->data; ixgbe_dev_info_get(dev, &dev_info); /* check that mtu is within the allowed range */ - if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) return -EINVAL; /* If device is started, refuse mtu that requires the support of @@ -4844,11 +4979,13 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); /* switch to jumbo mode if needed */ - if (frame_size > ETHER_MAX_LEN) { - dev->data->dev_conf.rxmode.jumbo_frame = 1; + if (frame_size > RTE_ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; hlreg0 |= IXGBE_HLREG0_JUMBOEN; } else { - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; } IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); @@ -4868,19 +5005,32 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) * Virtual Function operations */ static void -ixgbevf_intr_disable(struct ixgbe_hw *hw) +ixgbevf_intr_disable(struct rte_eth_dev *dev) { + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + PMD_INIT_FUNC_TRACE(); /* Clear interrupt mask to stop from interrupts being generated */ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); IXGBE_WRITE_FLUSH(hw); + + /* Clear mask value. */ + intr->mask = 0; } static void -ixgbevf_intr_enable(struct ixgbe_hw *hw) +ixgbevf_intr_enable(struct rte_eth_dev *dev) { + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + PMD_INIT_FUNC_TRACE(); /* VF enable interrupt autoclean */ @@ -4889,6 +5039,9 @@ ixgbevf_intr_enable(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); IXGBE_WRITE_FLUSH(hw); + + /* Save IXGBE_VTEIMS value to mask. */ + intr->mask = IXGBE_VF_IRQ_ENABLE_MASK; } static int @@ -4906,14 +5059,14 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) * Keep the persistent behavior the same as Host PF */ #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC - if (!conf->rxmode.hw_strip_crc) { + if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 1; + conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; } #else - if (conf->rxmode.hw_strip_crc) { + if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) { PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 0; + conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; } #endif @@ -4940,6 +5093,9 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + /* Stop the link setup handler before resetting the HW. */ + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); + err = hw->mac.ops.reset_hw(hw); if (err) { PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); @@ -4966,7 +5122,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) /* Set HW strip */ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - err = ixgbevf_vlan_offload_set(dev, mask); + err = ixgbevf_vlan_offload_config(dev, mask); if (err) { PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); ixgbe_dev_clear_queues(dev); @@ -5010,7 +5166,13 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) rte_intr_enable(intr_handle); /* Re-enable interrupt for VF */ - ixgbevf_intr_enable(hw); + ixgbevf_intr_enable(dev); + + /* + * Update link status right before return, because it may + * start link configuration process in a separate thread. + */ + ixgbevf_dev_link_update(dev, 0); return 0; } @@ -5019,12 +5181,16 @@ static void ixgbevf_dev_stop(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; PMD_INIT_FUNC_TRACE(); - ixgbevf_intr_disable(hw); + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); + + ixgbevf_intr_disable(dev); hw->adapter_stopped = 1; ixgbe_stop_adapter(hw); @@ -5046,6 +5212,8 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) rte_free(intr_handle->intr_vec); intr_handle->intr_vec = NULL; } + + adapter->rss_reta_updated = 0; } static void @@ -5162,24 +5330,34 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) } static int -ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) +ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) { - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_rx_queue *rxq; uint16_t i; int on = 0; /* VF function only support hw strip feature, others are not support */ if (mask & ETH_VLAN_STRIP_MASK) { - on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip); - - for (i = 0; i < hw->mac.max_rx_queues; i++) + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); ixgbevf_vlan_strip_queue_set(dev, i, on); + } } return 0; } +static int +ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + ixgbe_config_vlan_strip_on_all_queues(dev, mask); + + ixgbevf_vlan_offload_config(dev, mask); + + return 0; +} + int ixgbe_vt_check(struct ixgbe_hw *hw) { @@ -5196,7 +5374,7 @@ ixgbe_vt_check(struct ixgbe_hw *hw) } static uint32_t -ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr) +ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr) { uint32_t vector = 0; @@ -5227,8 +5405,8 @@ ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr) } static int -ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - uint8_t on) +ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, uint8_t on) { uint32_t vector; uint32_t uta_idx; @@ -5516,17 +5694,17 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - uint32_t mask; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t vec = IXGBE_MISC_VEC_ID; - mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); if (rte_intr_allow_others(intr_handle)) vec = IXGBE_RX_VEC_START; - mask |= (1 << vec); + intr->mask |= (1 << vec); RTE_SET_USED(queue_id); - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); rte_intr_enable(intr_handle); @@ -5536,19 +5714,19 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { - uint32_t mask; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t vec = IXGBE_MISC_VEC_ID; - mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); if (rte_intr_allow_others(intr_handle)) vec = IXGBE_RX_VEC_START; - mask &= ~(1 << vec); + intr->mask &= ~(1 << vec); RTE_SET_USED(queue_id); - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); return 0; } @@ -5714,6 +5892,13 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) if (vector_idx < base + intr_handle->nb_efd - 1) vector_idx++; } + + /* As RX queue setting above show, all queues use the vector 0. + * Set only the ITR value of IXGBE_MISC_VEC_ID. + */ + IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID), + IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) + | IXGBE_EITR_CNT_WDIS); } /** @@ -5735,8 +5920,12 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) /* won't configure msix register if no mapping is done * between intr vector and event fd + * but if misx has been enabled already, need to configure + * auto clean, auto mask and throttling. */ - if (!rte_intr_dp_is_en(intr_handle)) + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + if (!rte_intr_dp_is_en(intr_handle) && + !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) return; if (rte_intr_allow_others(intr_handle)) @@ -5760,30 +5949,34 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) /* Populate the IVAR table and set the ITR values to the * corresponding register. */ - for (queue_id = 0; queue_id < dev->data->nb_rx_queues; - queue_id++) { - /* by default, 1:1 mapping */ - ixgbe_set_ivar_map(hw, 0, queue_id, vec); - intr_handle->intr_vec[queue_id] = vec; - if (vec < base + intr_handle->nb_efd - 1) - vec++; - } + if (rte_intr_dp_is_en(intr_handle)) { + for (queue_id = 0; queue_id < dev->data->nb_rx_queues; + queue_id++) { + /* by default, 1:1 mapping */ + ixgbe_set_ivar_map(hw, 0, queue_id, vec); + intr_handle->intr_vec[queue_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, - IXGBE_MISC_VEC_ID); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); - break; - default: - break; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_set_ivar_map(hw, -1, + IXGBE_IVAR_OTHER_CAUSES_INDEX, + IXGBE_MISC_VEC_ID); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); + break; + default: + break; + } } IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), - IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF); + IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) + | IXGBE_EITR_CNT_WDIS); /* set up to autoclear timer, and the vectors */ mask = IXGBE_EIMS_ENABLE_MASK; @@ -5799,6 +5992,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_rxmode *rxmode; uint32_t rf_dec, rf_int; uint32_t bcnrc_val; uint16_t link_speed = dev->data->dev_link.link_speed; @@ -5820,14 +6014,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, bcnrc_val = 0; } + rxmode = &dev->data->dev_conf.rxmode; /* * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise * set as 0x4. */ - if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) && - (dev->data->dev_conf.rxmode.max_rx_pkt_len >= - IXGBE_MAX_JUMBO_FRAME_SIZE)) + if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) && + (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE)) IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_JUMBO_FRAME); else @@ -5843,7 +6037,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, } static int -ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, +ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, __attribute__((unused)) uint32_t index, __attribute__((unused)) uint32_t pool) { @@ -5855,7 +6049,8 @@ ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, * operation. Trap this case to avoid exhausting the [very limited] * set of PF resources used to store VF MAC addresses. */ - if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) + if (memcmp(hw->mac.perm_addr, mac_addr, + sizeof(struct rte_ether_addr)) == 0) return -1; diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); if (diag != 0) @@ -5875,8 +6070,9 @@ static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; - struct ether_addr *mac_addr; + struct rte_ether_addr *perm_addr = + (struct rte_ether_addr *)hw->mac.perm_addr; + struct rte_ether_addr *mac_addr; uint32_t i; int diag; @@ -5898,10 +6094,11 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) if (i == index) continue; /* Skip NULL MAC addresses */ - if (is_zero_ether_addr(mac_addr)) + if (rte_is_zero_ether_addr(mac_addr)) continue; /* Skip the permanent MAC address */ - if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) + if (memcmp(perm_addr, mac_addr, + sizeof(struct rte_ether_addr)) == 0) continue; diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); if (diag != 0) @@ -5919,12 +6116,15 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) } } -static void -ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) +static int +ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); + + return 0; } int @@ -6163,21 +6363,24 @@ static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { struct ixgbe_hw *hw; - uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; - struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; + uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD; + struct rte_eth_dev_data *dev_data = dev->data; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) + if (mtu < RTE_ETHER_MIN_MTU || + max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) return -EINVAL; - /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. + /* If device is started, refuse mtu that requires the support of + * scattered packets when this feature has not been enabled before. */ - if (!rx_conf->enable_scatter && + if (dev_data->dev_started && !dev_data->scattered_rx && (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > - dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { + PMD_INIT_LOG(ERR, "Stop port first."); return -EINVAL; + } /* * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU @@ -6463,8 +6666,8 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) return -EINVAL; - if (filter->ether_type == ETHER_TYPE_IPv4 || - filter->ether_type == ETHER_TYPE_IPv6) { + if (filter->ether_type == RTE_ETHER_TYPE_IPv4 || + filter->ether_type == RTE_ETHER_TYPE_IPv6) { PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" " ethertype filter.", filter->ether_type); return -EINVAL; @@ -6645,13 +6848,13 @@ ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw, *vmdq = 0; mc_addr = *mc_addr_ptr; - *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr)); + *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); return mc_addr; } static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) { struct ixgbe_hw *hw; @@ -6872,7 +7075,7 @@ ixgbe_timesync_enable(struct rte_eth_dev *dev) /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), - (ETHER_TYPE_1588 | + (RTE_ETHER_TYPE_1588 | IXGBE_ETQF_FILTER_EN | IXGBE_ETQF_1588)); @@ -7101,6 +7304,78 @@ ixgbe_set_eeprom(struct rte_eth_dev *dev, return eeprom->ops.write_buffer(hw, first, length, data); } +static int +ixgbe_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t status; + uint8_t sff8472_rev, addr_mode; + bool page_swap = false; + + /* Check whether we support SFF-8472 or not */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) + return -EIO; + + /* addressing mode is not supported */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) + return -EIO; + + if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { + PMD_DRV_LOG(ERR, + "Address change required to access page 0xA2, " + "but not supported. Please report the module " + "type to the driver maintainers."); + page_swap = true; + } + + if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { + /* We have a SFP, but it does not support SFF-8472 */ + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + modinfo->type = RTE_ETH_MODULE_SFF_8472; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int +ixgbe_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID; + uint8_t databyte = 0xFF; + uint8_t *data = info->data; + uint32_t i = 0; + + if (info->length == 0) + return -EINVAL; + + for (i = info->offset; i < info->offset + info->length; i++) { + if (i < RTE_ETH_MODULE_SFF_8079_LEN) + status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); + else + status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); + + if (status != 0) + return -EIO; + + data[i - info->offset] = databyte; + } + + return 0; +} + uint16_t ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { switch (mac_type) { @@ -8063,6 +8338,22 @@ ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, return ret; } +static void +ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC); +} + +static void +ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE); +} + static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) { @@ -8104,7 +8395,7 @@ ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - ixgbevf_intr_disable(hw); + ixgbevf_intr_disable(dev); /* read-on-clear nic registers here */ eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); @@ -8121,7 +8412,6 @@ ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) static int ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); @@ -8130,7 +8420,7 @@ ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) intr->flags &= ~IXGBE_FLAG_MAILBOX; } - ixgbevf_intr_enable(hw); + ixgbevf_intr_enable(dev); return 0; } @@ -8269,7 +8559,7 @@ ixgbe_rss_filter_restore(struct rte_eth_dev *dev) struct ixgbe_filter_info *filter_info = IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - if (filter_info->rss_info.num) + if (filter_info->rss_info.conf.queue_num) ixgbe_config_rss_filter(dev, &filter_info->rss_info, TRUE); } @@ -8381,9 +8671,7 @@ RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); -RTE_INIT(ixgbe_init_log); -static void -ixgbe_init_log(void) +RTE_INIT(ixgbe_init_log) { ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init"); if (ixgbe_logtype_init >= 0)