X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fixgbe%2Fixgbe_ethdev.c;h=3c7624f3a140b7dacd167a97630833bddce403b9;hb=41fdc03e72c55ac7f28f0d52f3c1935a19015180;hp=5ecf12928a3a58ac2878853006eac7a1a1f2fd11;hpb=3e8d16a0fbb3c9f33b9611e8e1644683e9ece23d;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 5ecf12928a..3c7624f3a1 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -22,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -126,6 +128,13 @@ #define IXGBE_EXVET_VET_EXT_SHIFT 16 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 +#define IXGBEVF_DEVARG_PFLINK_FULLCHK "pflink_fullchk" + +static const char * const ixgbevf_valid_arguments[] = { + IXGBEVF_DEVARG_PFLINK_FULLCHK, + NULL +}; + static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); @@ -140,10 +149,10 @@ static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); static void ixgbe_dev_close(struct rte_eth_dev *dev); static int ixgbe_dev_reset(struct rte_eth_dev *dev); -static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); -static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); -static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); -static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); +static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); +static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); static int ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, @@ -155,8 +164,8 @@ static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, static int ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, uint64_t *values, unsigned int n); -static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev); -static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); +static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev); +static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, unsigned int size); @@ -173,11 +182,11 @@ static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, uint8_t is_rx); static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size); -static void ixgbe_dev_info_get(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info); +static int ixgbe_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); -static void ixgbevf_dev_info_get(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info); +static int ixgbevf_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, @@ -189,6 +198,9 @@ static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on); static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); +static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, + int mask); +static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask); static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); @@ -214,15 +226,17 @@ static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); -static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, - struct rte_intr_handle *handle); +static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); static void ixgbe_dev_interrupt_handler(void *param); static void ixgbe_dev_interrupt_delayed_handler(void *param); -static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - uint32_t index, uint32_t pool); +static void ixgbe_dev_setup_link_alarm_handler(void *param); + +static int ixgbe_add_rar(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool); static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr); + struct rte_ether_addr *mac_addr); static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); static bool is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv); @@ -241,11 +255,12 @@ static void ixgbevf_intr_disable(struct rte_eth_dev *dev); static void ixgbevf_intr_enable(struct rte_eth_dev *dev); static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); -static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); +static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); +static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, @@ -255,12 +270,14 @@ static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); static void ixgbevf_configure_msix(struct rte_eth_dev *dev); -static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); -static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev); +static int ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); +static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); /* For Eth VMDQ APIs support */ static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct - ether_addr * mac_addr, uint8_t on); + rte_ether_addr * mac_addr, uint8_t on); static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, @@ -276,11 +293,11 @@ static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, static void ixgbe_configure_msix(struct rte_eth_dev *dev); static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr, + struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool); static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr); + struct rte_ether_addr *mac_addr); static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, struct rte_eth_syn_filter *filter); static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev, @@ -307,7 +324,7 @@ static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr); static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info); @@ -402,6 +419,16 @@ static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); int ixgbe_logtype_init; int ixgbe_logtype_driver; +#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX +int ixgbe_logtype_rx; +#endif +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX +int ixgbe_logtype_tx; +#endif +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE +int ixgbe_logtype_tx_free; +#endif + /* * The set of PCI devices this driver supports */ @@ -433,7 +460,6 @@ static const struct rte_pci_id pci_id_ixgbe_map[] = { { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, - { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, @@ -454,6 +480,7 @@ static const struct rte_pci_id pci_id_ixgbe_map[] = { { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI) }, #ifdef RTE_LIBRTE_IXGBE_BYPASS { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, #endif @@ -592,6 +619,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .xstats_get_names = ixgbevf_dev_xstats_get_names, .dev_close = ixgbevf_dev_close, .dev_reset = ixgbevf_dev_reset, + .promiscuous_enable = ixgbevf_dev_promiscuous_enable, + .promiscuous_disable = ixgbevf_dev_promiscuous_disable, .allmulticast_enable = ixgbevf_dev_allmulticast_enable, .allmulticast_disable = ixgbevf_dev_allmulticast_disable, .dev_infos_get = ixgbevf_dev_info_get, @@ -1115,6 +1144,14 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) return -EIO; } + if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { + PMD_INIT_LOG(ERR, "\nERROR: " + "Firmware recovery mode detected. Limiting functionality.\n" + "Refer to the Intel(R) Ethernet Adapters and Devices " + "User Guide for details on firmware recovery mode."); + return -EIO; + } + /* pick up the PCI bus settings for reporting later */ ixgbe_get_bus_info(hw); @@ -1195,29 +1232,34 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) ixgbe_reset_qstat_mappings(hw); /* Allocate memory for storing MAC addresses */ - eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * + eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %u bytes needed to store " "MAC addresses", - ETHER_ADDR_LEN * hw->mac.num_rar_entries); + RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } /* Copy the permanent MAC address */ - ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, ð_dev->data->mac_addrs[0]); /* Allocate memory for storing hash filter MAC addresses */ - eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * - IXGBE_VMDQ_NUM_UC_MAC, 0); + eth_dev->data->hash_mac_addrs = rte_zmalloc( + "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0); if (eth_dev->data->hash_mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses", - ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); + RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); return -ENOMEM; } + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + /* initialize the vfta */ memset(shadow_vfta, 0, sizeof(*shadow_vfta)); @@ -1284,73 +1326,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - struct ixgbe_hw *hw; - int retries = 0; - int ret; - PMD_INIT_FUNC_TRACE(); if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; - - hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - - if (hw->adapter_stopped == 0) - ixgbe_dev_close(eth_dev); - - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; - - /* Unlock any pending hardware semaphore */ - ixgbe_swfw_lock_reset(hw); - - /* disable uio intr before callback unregister */ - rte_intr_disable(intr_handle); - - do { - ret = rte_intr_callback_unregister(intr_handle, - ixgbe_dev_interrupt_handler, eth_dev); - if (ret >= 0) { - break; - } else if (ret != -EAGAIN) { - PMD_INIT_LOG(ERR, - "intr callback unregister failed: %d", - ret); - return ret; - } - rte_delay_ms(100); - } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); - - /* uninitialize PF if max_vfs not zero */ - ixgbe_pf_host_uninit(eth_dev); - - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; - - rte_free(eth_dev->data->hash_mac_addrs); - eth_dev->data->hash_mac_addrs = NULL; - - /* remove all the fdir filters & hash */ - ixgbe_fdir_filter_uninit(eth_dev); - - /* remove all the L2 tunnel filters & hash */ - ixgbe_l2_tn_filter_uninit(eth_dev); - - /* Remove all ntuple filters of the device */ - ixgbe_ntuple_filter_uninit(eth_dev); - - /* clear all the filters list */ - ixgbe_filterlist_flush(); + return 0; - /* Remove all Traffic Manager configuration */ - ixgbe_tm_conf_uninit(eth_dev); - -#ifdef RTE_LIBRTE_SECURITY - rte_free(eth_dev->security_ctx); -#endif + ixgbe_dev_close(eth_dev); return 0; } @@ -1484,7 +1465,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) } l2_tn_info->e_tag_en = FALSE; l2_tn_info->e_tag_fwd_en = FALSE; - l2_tn_info->e_tag_ether_type = ETHER_TYPE_ETAG; + l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; return 0; } @@ -1502,6 +1483,7 @@ ixgbevf_negotiate_api(struct ixgbe_hw *hw) /* start with highest supported, proceed down */ static const enum ixgbe_pfvf_api_rev sup_ver[] = { + ixgbe_mbox_api_13, ixgbe_mbox_api_12, ixgbe_mbox_api_11, ixgbe_mbox_api_10, @@ -1515,7 +1497,7 @@ ixgbevf_negotiate_api(struct ixgbe_hw *hw) } static void -generate_random_mac_addr(struct ether_addr *mac_addr) +generate_random_mac_addr(struct rte_ether_addr *mac_addr) { uint64_t random; @@ -1524,12 +1506,51 @@ generate_random_mac_addr(struct ether_addr *mac_addr) mac_addr->addr_bytes[1] = 0x09; mac_addr->addr_bytes[2] = 0xC0; /* Force indication of locally assigned MAC address. */ - mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR; + mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; /* Generate the last 3 bytes of the MAC address with a random number. */ random = rte_rand(); memcpy(&mac_addr->addr_bytes[3], &random, 3); } +static int +devarg_handle_int(__rte_unused const char *key, const char *value, + void *extra_args) +{ + uint16_t *n = extra_args; + + if (value == NULL || extra_args == NULL) + return -EINVAL; + + *n = (uint16_t)strtoul(value, NULL, 0); + if (*n == USHRT_MAX && errno == ERANGE) + return -1; + + return 0; +} + +static void +ixgbevf_parse_devargs(struct ixgbe_adapter *adapter, + struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + uint16_t pflink_fullchk; + + if (devargs == NULL) + return; + + kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments); + if (kvlist == NULL) + return; + + if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 && + rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK, + devarg_handle_int, &pflink_fullchk) == 0 && + pflink_fullchk == 1) + adapter->pflink_fullchk = 1; + + rte_kvargs_free(kvlist); +} + /* * Virtual Function device init */ @@ -1546,7 +1567,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); - struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; + struct rte_ether_addr *perm_addr = + (struct rte_ether_addr *)hw->mac.perm_addr; PMD_INIT_FUNC_TRACE(); @@ -1577,6 +1599,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) return 0; } + ixgbevf_parse_devargs(eth_dev->data->dev_private, + pci_dev->device.devargs); + rte_eth_copy_pci_info(eth_dev, pci_dev); hw->device_id = pci_dev->id.device_id; @@ -1615,7 +1640,12 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) */ if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); - return diag; + /* + * This error code will be propagated to the app by + * rte_eth_dev_reset, so use a public error code rather than + * the internal-only IXGBE_ERR_RESET_FAILED + */ + return -EAGAIN; } /* negotiate mailbox API version to use with the PF. */ @@ -1625,18 +1655,23 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) ixgbevf_get_queues(hw, &tcs, &tc); /* Allocate memory for storing MAC addresses */ - eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * + eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %u bytes needed to store " "MAC addresses", - ETHER_ADDR_LEN * hw->mac.num_rar_entries); + RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + /* Generate a random MAC address, if none was assigned by PF. */ - if (is_zero_ether_addr(perm_addr)) { + if (rte_is_zero_ether_addr(perm_addr)) { generate_random_mac_addr(perm_addr); diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); if (diag) { @@ -1656,7 +1691,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) } /* Copy the permanent MAC address */ - ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); + rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); /* reset the hardware with the new settings */ diag = hw->mac.ops.start_hw(hw); @@ -1686,33 +1721,12 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - struct ixgbe_hw *hw; - PMD_INIT_FUNC_TRACE(); if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; - - hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - - if (hw->adapter_stopped == 0) - ixgbevf_dev_close(eth_dev); - - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; - - /* Disable the interrupts for VF */ - ixgbevf_intr_disable(eth_dev); - - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; + return 0; - rte_intr_disable(intr_handle); - rte_intr_callback_unregister(intr_handle, - ixgbevf_dev_interrupt_handler, eth_dev); + ixgbevf_dev_close(eth_dev); return 0; } @@ -1796,8 +1810,7 @@ static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_ixgbe_pmd = { .id_table = pci_id_ixgbe_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = eth_ixgbe_pci_probe, .remove = eth_ixgbe_pci_remove, }; @@ -1819,7 +1832,7 @@ static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) */ static struct rte_pci_driver rte_ixgbevf_pmd = { .id_table = pci_id_ixgbevf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, .probe = eth_ixgbevf_pci_probe, .remove = eth_ixgbevf_pci_remove, }; @@ -1974,10 +1987,13 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) rxq = dev->data->rx_queues[queue]; - if (on) + if (on) { rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; - else + rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } else { rxq->vlan_flags = PKT_RX_VLAN; + rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } } static void @@ -2129,8 +2145,30 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) } } +static void +ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) +{ + uint16_t i; + struct rte_eth_rxmode *rxmode; + struct ixgbe_rx_queue *rxq; + + if (mask & ETH_VLAN_STRIP_MASK) { + rxmode = &dev->data->dev_conf.rxmode; + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } + else + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } + } +} + static int -ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) +ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) { struct rte_eth_rxmode *rxmode; rxmode = &dev->data->dev_conf.rxmode; @@ -2156,6 +2194,16 @@ ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) return 0; } +static int +ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + ixgbe_config_vlan_strip_on_all_queues(dev, mask); + + ixgbe_vlan_offload_config(dev, mask); + + return 0; +} + static void ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) { @@ -2350,8 +2398,7 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; int ret; PMD_INIT_FUNC_TRACE(); @@ -2408,9 +2455,12 @@ ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, uint32_t queue_end; uint16_t total_rate = 0; struct rte_pci_device *pci_dev; + int ret; pci_dev = RTE_ETH_DEV_TO_PCI(dev); - rte_eth_link_get_nowait(dev->data->port_id, &link); + ret = rte_eth_link_get_nowait(dev->data->port_id, &link); + if (ret < 0) + return ret; if (vf >= pci_dev->max_vfs) return -EINVAL; @@ -2506,6 +2556,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev) return -EINVAL; } + /* Stop the link setup handler before resetting the HW. */ + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); + /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); @@ -2567,7 +2620,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - err = ixgbe_vlan_offload_set(dev, mask); + err = ixgbe_vlan_offload_config(dev, mask); if (err) { PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); goto error; @@ -2606,10 +2659,16 @@ ixgbe_dev_start(struct rte_eth_dev *dev) goto error; } - /* Skip link setup if loopback mode is enabled for 82599. */ - if (hw->mac.type == ixgbe_mac_82599EB && - dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) - goto skip_link_setup; + /* Skip link setup if loopback mode is enabled. */ + if (dev->data->dev_conf.lpbk_mode != 0) { + err = ixgbe_check_supported_loopback_mode(dev); + if (err < 0) { + PMD_INIT_LOG(ERR, "Unsupported loopback mode"); + goto error; + } else { + goto skip_link_setup; + } + } if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { err = hw->mac.ops.setup_sfp(hw); @@ -2641,6 +2700,10 @@ ixgbe_dev_start(struct rte_eth_dev *dev) allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G | ETH_LINK_SPEED_10G; + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) + allowed_speeds = ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; break; default: allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | @@ -2682,14 +2745,14 @@ ixgbe_dev_start(struct rte_eth_dev *dev) speed |= IXGBE_LINK_SPEED_1GB_FULL; if (*link_speeds & ETH_LINK_SPEED_100M) speed |= IXGBE_LINK_SPEED_100_FULL; + if (*link_speeds & ETH_LINK_SPEED_10M) + speed |= IXGBE_LINK_SPEED_10_FULL; } err = ixgbe_setup_link(hw, speed, link_up); if (err) goto error; - ixgbe_dev_link_update(dev, 0); - skip_link_setup: if (rte_intr_allow_others(intr_handle)) { @@ -2725,6 +2788,12 @@ skip_link_setup: "please call hierarchy_commit() " "before starting the port"); + /* + * Update link status right before return, because it may + * start link configuration process in a separate thread. + */ + ixgbe_dev_link_update(dev, 0); + return 0; error: @@ -2740,6 +2809,7 @@ static void ixgbe_dev_stop(struct rte_eth_dev *dev) { struct rte_eth_link link; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = @@ -2750,8 +2820,13 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) struct ixgbe_tm_conf *tm_conf = IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + if (hw->adapter_stopped) + return; + PMD_INIT_FUNC_TRACE(); + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); + /* disable interrupts */ ixgbe_disable_intr(hw); @@ -2798,6 +2873,10 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) /* reset hierarchy commit */ tm_conf->committed = false; + + adapter->rss_reta_updated = 0; + + hw->adapter_stopped = true; } /* @@ -2868,13 +2947,16 @@ ixgbe_dev_close(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + int retries = 0; + int ret; PMD_INIT_FUNC_TRACE(); ixgbe_pf_reset_hw(hw); ixgbe_dev_stop(dev); - hw->adapter_stopped = 1; ixgbe_dev_free_queues(dev); @@ -2882,6 +2964,55 @@ ixgbe_dev_close(struct rte_eth_dev *dev) /* reprogram the RAR[0] in case user changed it. */ ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + /* Unlock any pending hardware semaphore */ + ixgbe_swfw_lock_reset(hw); + + /* disable uio intr before callback unregister */ + rte_intr_disable(intr_handle); + + do { + ret = rte_intr_callback_unregister(intr_handle, + ixgbe_dev_interrupt_handler, dev); + if (ret >= 0) { + break; + } else if (ret != -EAGAIN) { + PMD_INIT_LOG(ERR, + "intr callback unregister failed: %d", + ret); + } + rte_delay_ms(100); + } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); + + /* cancel the delay handler before remove dev */ + rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, dev); + + /* uninitialize PF if max_vfs not zero */ + ixgbe_pf_host_uninit(dev); + + /* remove all the fdir filters & hash */ + ixgbe_fdir_filter_uninit(dev); + + /* remove all the L2 tunnel filters & hash */ + ixgbe_l2_tn_filter_uninit(dev); + + /* Remove all ntuple filters of the device */ + ixgbe_ntuple_filter_uninit(dev); + + /* clear all the filters list */ + ixgbe_filterlist_flush(); + + /* Remove all Traffic Manager configuration */ + ixgbe_tm_conf_uninit(dev); + +#ifdef RTE_LIBRTE_SECURITY + rte_free(dev->security_ctx); +#endif + } /* @@ -2973,7 +3104,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, hw_stats->qbrc[i] += ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); if (crc_strip == 0) - hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN; + hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN; hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); hw_stats->qbtc[i] += @@ -3018,12 +3149,12 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); if (crc_strip == 0) - hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN; + hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN; uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); hw_stats->gptc += delta_gptc; - hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN; - hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN; + hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN; + hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; /* * Workaround: mprc hardware is incorrectly counting @@ -3053,7 +3184,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, hw_stats->gptc -= total; hw_stats->mptc -= total; hw_stats->ptc64 -= total; - hw_stats->gotc -= total * ETHER_MIN_LEN; + hw_stats->gotc -= total * RTE_ETHER_MIN_LEN; hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); @@ -3081,9 +3212,18 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, } /* Flow Director Stats registers */ - hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); - hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); - + if (hw->mac.type != ixgbe_mac_82598EB) { + hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + hw_stats->fdirustat_add += IXGBE_READ_REG(hw, + IXGBE_FDIRUSTAT) & 0xFFFF; + hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw, + IXGBE_FDIRUSTAT) >> 16) & 0xFFFF; + hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw, + IXGBE_FDIRFSTAT) & 0xFFFF; + hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw, + IXGBE_FDIRFSTAT) >> 16) & 0xFFFF; + } /* MACsec Stats registers */ macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); macsec_stats->out_pkts_encrypted += @@ -3178,7 +3318,7 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) return 0; } -static void +static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev) { struct ixgbe_hw_stats *stats = @@ -3189,6 +3329,8 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev) /* Reset software totals */ memset(stats, 0, sizeof(*stats)); + + return 0; } /* This function calculates the number of xstats based on the current config */ @@ -3214,19 +3356,17 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, /* Extended stats from ixgbe_hw_stats */ for (i = 0; i < IXGBE_NB_HW_STATS; i++) { - snprintf(xstats_names[count].name, - sizeof(xstats_names[count].name), - "%s", - rte_ixgbe_stats_strings[i].name); + strlcpy(xstats_names[count].name, + rte_ixgbe_stats_strings[i].name, + sizeof(xstats_names[count].name)); count++; } /* MACsec Stats */ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { - snprintf(xstats_names[count].name, - sizeof(xstats_names[count].name), - "%s", - rte_ixgbe_macsec_strings[i].name); + strlcpy(xstats_names[count].name, + rte_ixgbe_macsec_strings[i].name, + sizeof(xstats_names[count].name)); count++; } @@ -3274,19 +3414,17 @@ static int ixgbe_dev_xstats_get_names_by_id( /* Extended stats from ixgbe_hw_stats */ for (i = 0; i < IXGBE_NB_HW_STATS; i++) { - snprintf(xstats_names[count].name, - sizeof(xstats_names[count].name), - "%s", - rte_ixgbe_stats_strings[i].name); + strlcpy(xstats_names[count].name, + rte_ixgbe_stats_strings[i].name, + sizeof(xstats_names[count].name)); count++; } /* MACsec Stats */ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { - snprintf(xstats_names[count].name, - sizeof(xstats_names[count].name), - "%s", - rte_ixgbe_macsec_strings[i].name); + strlcpy(xstats_names[count].name, + rte_ixgbe_macsec_strings[i].name, + sizeof(xstats_names[count].name)); count++; } @@ -3343,9 +3481,9 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, if (xstats_names != NULL) for (i = 0; i < IXGBEVF_NB_XSTATS; i++) - snprintf(xstats_names[i].name, - sizeof(xstats_names[i].name), - "%s", rte_ixgbevf_stats_strings[i].name); + strlcpy(xstats_names[i].name, + rte_ixgbevf_stats_strings[i].name, + sizeof(xstats_names[i].name)); return IXGBEVF_NB_XSTATS; } @@ -3514,7 +3652,7 @@ ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, return n; } -static void +static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) { struct ixgbe_hw_stats *stats = @@ -3531,6 +3669,8 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) /* Reset software totals */ memset(stats, 0, sizeof(*stats)); memset(macsec_stats, 0, sizeof(*macsec_stats)); + + return 0; } static void @@ -3605,7 +3745,7 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) return 0; } -static void +static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) { struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) @@ -3619,6 +3759,8 @@ ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) hw_stats->vfgorc = 0; hw_stats->vfgptc = 0; hw_stats->vfgotc = 0; + + return 0; } static int @@ -3642,7 +3784,7 @@ ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) return 0; } -static void +static int ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); @@ -3669,6 +3811,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_vmdq_pools = ETH_16_POOLS; else dev_info->max_vmdq_pools = ETH_64_POOLS; + dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; dev_info->vmdq_queue_num = dev_info->max_rx_queues; dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | @@ -3706,6 +3850,11 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) + dev_info->speed_capa = ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; + if (hw->mac.type == ixgbe_mac_X540 || hw->mac.type == ixgbe_mac_X540_vf || hw->mac.type == ixgbe_mac_X550 || @@ -3716,6 +3865,16 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa |= ETH_LINK_SPEED_2_5G; dev_info->speed_capa |= ETH_LINK_SPEED_5G; } + + /* Driver-preferred Rx/Tx parameters */ + dev_info->default_rxportconf.burst_size = 32; + dev_info->default_txportconf.burst_size = 32; + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_rxportconf.ring_size = 256; + dev_info->default_txportconf.ring_size = 256; + + return 0; } static const uint32_t * @@ -3749,7 +3908,7 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) return ptypes; -#if defined(RTE_ARCH_X86) +#if defined(RTE_ARCH_X86) || defined(RTE_MACHINE_CPUFLAG_NEON) if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec || dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) return ptypes; @@ -3757,7 +3916,7 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } -static void +static int ixgbevf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { @@ -3768,6 +3927,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */ + dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; dev_info->max_vfs = pci_dev->max_vfs; @@ -3780,6 +3940,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, dev_info->rx_queue_offload_capa); dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); + dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); + dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); + dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -3805,17 +3968,16 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, dev_info->rx_desc_lim = rx_desc_lim; dev_info->tx_desc_lim = tx_desc_lim; + + return 0; } static int ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, int *link_up, int wait_to_complete) { - /** - * for a quick link status checking, wait_to_compelet == 0, - * skip PF link status checking - */ - bool no_pflink_check = wait_to_complete == 0; + struct ixgbe_adapter *adapter = container_of(hw, + struct ixgbe_adapter, hw); struct ixgbe_mbx_info *mbx = &hw->mbx; struct ixgbe_mac_info *mac = &hw->mac; uint32_t links_reg, in_msg; @@ -3876,7 +4038,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, *speed = IXGBE_LINK_SPEED_UNKNOWN; } - if (no_pflink_check) { + if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) { if (*speed == IXGBE_LINK_SPEED_UNKNOWN) mac->get_link_status = true; else @@ -3884,6 +4046,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, goto out; } + /* if the read failed it could just be a mailbox collision, best wait * until we are called again and don't report an error */ @@ -3893,7 +4056,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { /* msg is not CTS and is NACK we must have lost CTS status */ if (in_msg & IXGBE_VT_MSGTYPE_NACK) - ret_val = -1; + mac->get_link_status = false; goto out; } @@ -3913,6 +4076,25 @@ out: return ret_val; } +static void +ixgbe_dev_setup_link_alarm_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + u32 speed; + bool autoneg = false; + + speed = hw->phy.autoneg_advertised; + if (!speed) + ixgbe_get_link_capabilities(hw, &speed, &autoneg); + + ixgbe_setup_link(hw, speed, true); + + intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; +} + /* return 0 means link status changed, -1 means not changed */ int ixgbe_dev_link_update_share(struct rte_eth_dev *dev, @@ -3925,9 +4107,7 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); int link_up; int diag; - u32 speed = 0; int wait = 1; - bool autoneg = false; memset(&link, 0, sizeof(link)); link.link_status = ETH_LINK_DOWN; @@ -3937,13 +4117,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, hw->mac.get_link_status = true; - if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) && - ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { - speed = hw->phy.autoneg_advertised; - if (!speed) - ixgbe_get_link_capabilities(hw, &speed, &autoneg); - ixgbe_setup_link(hw, speed, true); - } + if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) + return rte_eth_linkstatus_set(dev, &link); /* check if it needs to wait to complete, if lsc interrupt is enabled */ if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) @@ -3961,19 +4136,25 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, } if (link_up == 0) { - intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { + intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + rte_eal_alarm_set(10, + ixgbe_dev_setup_link_alarm_handler, dev); + } return rte_eth_linkstatus_set(dev, &link); } - intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; link.link_status = ETH_LINK_UP; link.link_duplex = ETH_LINK_FULL_DUPLEX; switch (link_speed) { default: case IXGBE_LINK_SPEED_UNKNOWN: - link.link_duplex = ETH_LINK_FULL_DUPLEX; - link.link_speed = ETH_SPEED_NUM_100M; + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) + link.link_speed = ETH_SPEED_NUM_10M; + else + link.link_speed = ETH_SPEED_NUM_100M; break; case IXGBE_LINK_SPEED_100_FULL: @@ -4012,7 +4193,7 @@ ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) return ixgbe_dev_link_update_share(dev, wait_to_complete, 1); } -static void +static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -4021,9 +4202,11 @@ ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + return 0; } -static void +static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -4036,9 +4219,11 @@ ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) else fctrl &= (~IXGBE_FCTRL_MPE); IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + return 0; } -static void +static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -4047,20 +4232,24 @@ ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); fctrl |= IXGBE_FCTRL_MPE; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + return 0; } -static void +static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t fctrl; if (dev->data->promiscuous == 1) - return; /* must remain in all_multicast mode */ + return 0; /* must remain in all_multicast mode */ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); fctrl &= (~IXGBE_FCTRL_MPE); IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + return 0; } /** @@ -4226,8 +4415,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev) * - On failure, a negative value. */ static int -ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, - struct rte_intr_handle *intr_handle) +ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); @@ -4278,7 +4466,6 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, PMD_DRV_LOG(DEBUG, "enable intr immediately"); ixgbe_enable_intr(dev); - rte_intr_enable(intr_handle); return 0; } @@ -4340,7 +4527,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param) PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); ixgbe_enable_intr(dev); - rte_intr_enable(intr_handle); + rte_intr_ack(intr_handle); } /** @@ -4361,7 +4548,7 @@ ixgbe_dev_interrupt_handler(void *param) struct rte_eth_dev *dev = (struct rte_eth_dev *)param; ixgbe_dev_interrupt_get_status(dev); - ixgbe_dev_interrupt_action(dev, dev->intr_handle); + ixgbe_dev_interrupt_action(dev); } static int @@ -4456,7 +4643,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * At least reserve one Ethernet frame for watermark * high_water/low_water in kilo bytes for ixgbe */ - max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; + max_high_water = (rx_buf_size - + RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; if ((fc_conf->high_water > max_high_water) || (fc_conf->high_water < fc_conf->low_water)) { PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); @@ -4677,7 +4865,8 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p * At least reserve one Ethernet frame for watermark * high_water/low_water in kilo bytes for ixgbe */ - max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; + max_high_water = (rx_buf_size - + RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; if ((pfc_conf->fc.high_water > max_high_water) || (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); @@ -4710,6 +4899,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, uint8_t j, mask; uint32_t reta, r; uint16_t idx, shift; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t reta_reg; @@ -4751,6 +4941,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, } IXGBE_WRITE_REG(hw, reta_reg, reta); } + adapter->rss_reta_updated = 1; return 0; } @@ -4798,7 +4989,7 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, } static int -ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, +ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -4817,7 +5008,7 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) } static int -ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) +ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); @@ -4849,13 +5040,16 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) uint32_t maxfrs; struct ixgbe_hw *hw; struct rte_eth_dev_info dev_info; - uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD; struct rte_eth_dev_data *dev_data = dev->data; + int ret; - ixgbe_dev_info_get(dev, &dev_info); + ret = ixgbe_dev_info_get(dev, &dev_info); + if (ret != 0) + return ret; /* check that mtu is within the allowed range */ - if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) return -EINVAL; /* If device is started, refuse mtu that requires the support of @@ -4872,7 +5066,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); /* switch to jumbo mode if needed */ - if (frame_size > ETHER_MAX_LEN) { + if (frame_size > RTE_ETHER_MAX_LEN) { dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; hlreg0 |= IXGBE_HLREG0_JUMBOEN; @@ -4941,8 +5135,7 @@ static int ixgbevf_dev_configure(struct rte_eth_dev *dev) { struct rte_eth_conf *conf = &dev->data->dev_conf; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", dev->data->port_id); @@ -4952,14 +5145,14 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) * Keep the persistent behavior the same as Host PF */ #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC - if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { + if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); - conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP; + conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; } #else - if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) { + if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) { PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); - conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP; + conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; } #endif @@ -4986,6 +5179,9 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + /* Stop the link setup handler before resetting the HW. */ + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); + err = hw->mac.ops.reset_hw(hw); if (err) { PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); @@ -5012,7 +5208,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) /* Set HW strip */ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - err = ixgbevf_vlan_offload_set(dev, mask); + err = ixgbevf_vlan_offload_config(dev, mask); if (err) { PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); ixgbe_dev_clear_queues(dev); @@ -5021,8 +5217,6 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) ixgbevf_dev_rxtx_start(dev); - ixgbevf_dev_link_update(dev, 0); - /* check and configure queue intr-vector mapping */ if (rte_intr_cap_multiple(intr_handle) && dev->data->dev_conf.intr_conf.rxq) { @@ -5060,6 +5254,14 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) /* Re-enable interrupt for VF */ ixgbevf_intr_enable(dev); + /* + * Update link status right before return, because it may + * start link configuration process in a separate thread. + */ + ixgbevf_dev_link_update(dev, 0); + + hw->adapter_stopped = false; + return 0; } @@ -5067,11 +5269,17 @@ static void ixgbevf_dev_stop(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = dev->data->dev_private; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + if (hw->adapter_stopped) + return; + PMD_INIT_FUNC_TRACE(); + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); + ixgbevf_intr_disable(dev); hw->adapter_stopped = 1; @@ -5094,12 +5302,16 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) rte_free(intr_handle->intr_vec); intr_handle->intr_vec = NULL; } + + adapter->rss_reta_updated = 0; } static void ixgbevf_dev_close(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; PMD_INIT_FUNC_TRACE(); @@ -5115,6 +5327,14 @@ ixgbevf_dev_close(struct rte_eth_dev *dev) * after stop, close and detach of the VF **/ ixgbevf_remove_mac_addr(dev, 0); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + ixgbevf_dev_interrupt_handler, dev); } /* @@ -5210,7 +5430,7 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) } static int -ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) +ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) { struct ixgbe_rx_queue *rxq; uint16_t i; @@ -5228,6 +5448,16 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) return 0; } +static int +ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + ixgbe_config_vlan_strip_on_all_queues(dev, mask); + + ixgbevf_vlan_offload_config(dev, mask); + + return 0; +} + int ixgbe_vt_check(struct ixgbe_hw *hw) { @@ -5244,7 +5474,7 @@ ixgbe_vt_check(struct ixgbe_hw *hw) } static uint32_t -ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr) +ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr) { uint32_t vector = 0; @@ -5275,8 +5505,8 @@ ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr) } static int -ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - uint8_t on) +ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, uint8_t on) { uint32_t vector; uint32_t uta_idx; @@ -5576,7 +5806,7 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) RTE_SET_USED(queue_id); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); - rte_intr_enable(intr_handle); + rte_intr_ack(intr_handle); return 0; } @@ -5625,7 +5855,7 @@ ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) mask &= (1 << (queue_id - 32)); IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); } - rte_intr_enable(intr_handle); + rte_intr_ack(intr_handle); return 0; } @@ -5708,7 +5938,8 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); } else if ((hw->mac.type == ixgbe_mac_82599EB) || (hw->mac.type == ixgbe_mac_X540) || - (hw->mac.type == ixgbe_mac_X550)) { + (hw->mac.type == ixgbe_mac_X550) || + (hw->mac.type == ixgbe_mac_X550EM_x)) { if (direction == -1) { /* other causes */ idx = ((queue & 1) * 8); @@ -5838,6 +6069,7 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); break; default: @@ -5907,7 +6139,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, } static int -ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, +ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, __attribute__((unused)) uint32_t index, __attribute__((unused)) uint32_t pool) { @@ -5919,7 +6151,8 @@ ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, * operation. Trap this case to avoid exhausting the [very limited] * set of PF resources used to store VF MAC addresses. */ - if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) + if (memcmp(hw->mac.perm_addr, mac_addr, + sizeof(struct rte_ether_addr)) == 0) return -1; diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); if (diag != 0) @@ -5939,8 +6172,9 @@ static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; - struct ether_addr *mac_addr; + struct rte_ether_addr *perm_addr = + (struct rte_ether_addr *)hw->mac.perm_addr; + struct rte_ether_addr *mac_addr; uint32_t i; int diag; @@ -5962,10 +6196,11 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) if (i == index) continue; /* Skip NULL MAC addresses */ - if (is_zero_ether_addr(mac_addr)) + if (rte_is_zero_ether_addr(mac_addr)) continue; /* Skip the permanent MAC address */ - if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) + if (memcmp(perm_addr, mac_addr, + sizeof(struct rte_ether_addr)) == 0) continue; diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); if (diag != 0) @@ -5984,7 +6219,8 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) } static int -ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) +ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -6229,21 +6465,24 @@ static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { struct ixgbe_hw *hw; - uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; - struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; + uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD; + struct rte_eth_dev_data *dev_data = dev->data; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) + if (mtu < RTE_ETHER_MIN_MTU || + max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) return -EINVAL; - /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. + /* If device is started, refuse mtu that requires the support of + * scattered packets when this feature has not been enabled before. */ - if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) && + if (dev_data->dev_started && !dev_data->scattered_rx && (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > - dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { + PMD_INIT_LOG(ERR, "Stop port first."); return -EINVAL; + } /* * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU @@ -6529,8 +6768,8 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) return -EINVAL; - if (filter->ether_type == ETHER_TYPE_IPv4 || - filter->ether_type == ETHER_TYPE_IPv6) { + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" " ethertype filter.", filter->ether_type); return -EINVAL; @@ -6711,13 +6950,13 @@ ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw, *vmdq = 0; mc_addr = *mc_addr_ptr; - *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr)); + *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); return mc_addr; } static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) { struct ixgbe_hw *hw; @@ -6807,8 +7046,7 @@ static void ixgbe_start_timecounters(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct rte_eth_link link; uint32_t incval = 0; uint32_t shift = 0; @@ -6876,8 +7114,7 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev) static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) { - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; adapter->systime_tc.nsec += delta; adapter->rx_tstamp_tc.nsec += delta; @@ -6890,8 +7127,7 @@ static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) { uint64_t ns; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; ns = rte_timespec_to_ns(ts); /* Set the timecounters to a new value. */ @@ -6906,8 +7142,7 @@ static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) { uint64_t ns, systime_cycles; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; systime_cycles = ixgbe_read_systime_cyclecounter(dev); ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); @@ -6938,7 +7173,7 @@ ixgbe_timesync_enable(struct rte_eth_dev *dev) /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), - (ETHER_TYPE_1588 | + (RTE_ETHER_TYPE_1588 | IXGBE_ETQF_FILTER_EN | IXGBE_ETQF_1588)); @@ -6988,8 +7223,7 @@ ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, uint32_t flags __rte_unused) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; uint32_t tsync_rxctl; uint64_t rx_tstamp_cycles; uint64_t ns; @@ -7010,8 +7244,7 @@ ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, struct timespec *timestamp) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; uint32_t tsync_txctl; uint64_t tx_tstamp_cycles; uint64_t ns; @@ -7250,6 +7483,9 @@ ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { case ixgbe_mac_X550EM_x_vf: case ixgbe_mac_X550EM_a_vf: return ETH_RSS_RETA_SIZE_64; + case ixgbe_mac_X540_vf: + case ixgbe_mac_82599_vf: + return 0; default: return ETH_RSS_RETA_SIZE_128; } @@ -8201,20 +8437,89 @@ ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, return ret; } -static void +static int +ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC)) { + case IXGBE_SUCCESS: + ret = 0; + break; + case IXGBE_ERR_FEATURE_NOT_SUPPORTED: + ret = -ENOTSUP; + break; + default: + ret = -EAGAIN; + break; + } + + return ret; +} + +static int +ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE)) { + case IXGBE_SUCCESS: + ret = 0; + break; + case IXGBE_ERR_FEATURE_NOT_SUPPORTED: + ret = -ENOTSUP; + break; + default: + ret = -EAGAIN; + break; + } + + return ret; +} + +static int ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + int mode = IXGBEVF_XCAST_MODE_ALLMULTI; + + switch (hw->mac.ops.update_xcast_mode(hw, mode)) { + case IXGBE_SUCCESS: + ret = 0; + break; + case IXGBE_ERR_FEATURE_NOT_SUPPORTED: + ret = -ENOTSUP; + break; + default: + ret = -EAGAIN; + break; + } - hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI); + return ret; } -static void +static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) { + case IXGBE_SUCCESS: + ret = 0; + break; + case IXGBE_ERR_FEATURE_NOT_SUPPORTED: + ret = -ENOTSUP; + break; + default: + ret = -EAGAIN; + break; + } - hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI); + return ret; } static void ixgbevf_mbx_process(struct rte_eth_dev *dev) @@ -8517,10 +8822,10 @@ RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf, + IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>"); -RTE_INIT(ixgbe_init_log); -static void -ixgbe_init_log(void) +RTE_INIT(ixgbe_init_log) { ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init"); if (ixgbe_logtype_init >= 0) @@ -8528,4 +8833,21 @@ ixgbe_init_log(void) ixgbe_logtype_driver = rte_log_register("pmd.net.ixgbe.driver"); if (ixgbe_logtype_driver >= 0) rte_log_set_level(ixgbe_logtype_driver, RTE_LOG_NOTICE); +#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX + ixgbe_logtype_rx = rte_log_register("pmd.net.ixgbe.rx"); + if (ixgbe_logtype_rx >= 0) + rte_log_set_level(ixgbe_logtype_rx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX + ixgbe_logtype_tx = rte_log_register("pmd.net.ixgbe.tx"); + if (ixgbe_logtype_tx >= 0) + rte_log_set_level(ixgbe_logtype_tx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE + ixgbe_logtype_tx_free = rte_log_register("pmd.net.ixgbe.tx_free"); + if (ixgbe_logtype_tx_free >= 0) + rte_log_set_level(ixgbe_logtype_tx_free, RTE_LOG_DEBUG); +#endif }