X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev.c;h=a6b97e1649cdd830d99716968bb2d9fe212b87f5;hb=286a809c99c033f895c6b08c2af4b741f17ae29b;hp=9585c67394eddf87edf85d6d90f58a4e5726333b;hpb=a075ce2b3e8ccf8e60c0dd2aec4b88a04a744de2;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 9585c67394..a6b97e1649 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -42,6 +42,9 @@ #define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb" #define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list" +#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver" +#define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf" +#define ETH_I40E_USE_LATEST_VEC "use-latest-supported-vec" #define I40E_CLEAR_PXE_WAIT_MS 200 @@ -290,6 +293,7 @@ static void i40e_stat_update_48(struct i40e_hw *hw, uint64_t *stat); static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue); static void i40e_dev_interrupt_handler(void *param); +static void i40e_dev_alarm_handler(void *param); static int i40e_res_pool_init(struct i40e_res_pool_info *pool, uint32_t base, uint32_t num); static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool); @@ -387,7 +391,7 @@ static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf, struct i40e_ethertype_filter *filter); static int i40e_tunnel_filter_convert( - struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter, + struct i40e_aqc_cloud_filters_element_bb *cld_filter, struct i40e_tunnel_filter *tunnel_filter); static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf, struct i40e_tunnel_filter *tunnel_filter); @@ -401,6 +405,14 @@ static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev); int i40e_logtype_init; int i40e_logtype_driver; +static const char *const valid_keys[] = { + ETH_I40E_FLOATING_VEB_ARG, + ETH_I40E_FLOATING_VEB_LIST_ARG, + ETH_I40E_SUPPORT_MULTI_DRIVER, + ETH_I40E_QUEUE_NUM_PER_VF_ARG, + ETH_I40E_USE_LATEST_VEC, + NULL}; + static const struct rte_pci_id pci_id_i40e_map[] = { { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) }, @@ -852,7 +864,7 @@ config_vf_floating_veb(struct rte_devargs *devargs, if (devargs == NULL) return; - kvlist = rte_kvargs_parse(devargs->args, NULL); + kvlist = rte_kvargs_parse(devargs->args, valid_keys); if (kvlist == NULL) return; @@ -893,7 +905,7 @@ is_floating_veb_supported(struct rte_devargs *devargs) if (devargs == NULL) return 0; - kvlist = rte_kvargs_parse(devargs->args, NULL); + kvlist = rte_kvargs_parse(devargs->args, valid_keys); if (kvlist == NULL) return 0; @@ -1100,8 +1112,6 @@ i40e_init_queue_region_conf(struct rte_eth_dev *dev) memset(info, 0, sizeof(struct i40e_queue_regions)); } -#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver" - static int i40e_parse_multi_drv_handler(__rte_unused const char *key, const char *value, @@ -1133,9 +1143,8 @@ static int i40e_support_multi_driver(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - static const char *const valid_keys[] = { - ETH_I40E_SUPPORT_MULTI_DRIVER, NULL}; struct rte_kvargs *kvlist; + int kvargs_count; /* Enable global configuration by default */ pf->support_multi_driver = false; @@ -1147,7 +1156,13 @@ i40e_support_multi_driver(struct rte_eth_dev *dev) if (!kvlist) return -EINVAL; - if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1) + kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER); + if (!kvargs_count) { + rte_kvargs_free(kvlist); + return 0; + } + + if (kvargs_count > 1) PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only " "the first invalid or last valid one is used !", ETH_I40E_SUPPORT_MULTI_DRIVER); @@ -1189,6 +1204,66 @@ i40e_aq_debug_write_global_register(struct i40e_hw *hw, return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details); } +static int +i40e_parse_latest_vec_handler(__rte_unused const char *key, + const char *value, + void *opaque) +{ + struct i40e_adapter *ad; + int use_latest_vec; + + ad = (struct i40e_adapter *)opaque; + + use_latest_vec = atoi(value); + + if (use_latest_vec != 0 && use_latest_vec != 1) + PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!"); + + ad->use_latest_vec = (uint8_t)use_latest_vec; + + return 0; +} + +static int +i40e_use_latest_vec(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct rte_kvargs *kvlist; + int kvargs_count; + + ad->use_latest_vec = false; + + if (!dev->device->devargs) + return 0; + + kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); + if (!kvlist) + return -EINVAL; + + kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC); + if (!kvargs_count) { + rte_kvargs_free(kvlist); + return 0; + } + + if (kvargs_count > 1) + PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only " + "the first invalid or last valid one is used !", + ETH_I40E_USE_LATEST_VEC); + + if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC, + i40e_parse_latest_vec_handler, ad) < 0) { + rte_kvargs_free(kvlist); + return -EINVAL; + } + + rte_kvargs_free(kvlist); + return 0; +} + +#define I40E_ALARM_INTERVAL 50000 /* us */ + static int eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) { @@ -1198,7 +1273,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *vsi; int ret; - uint32_t len; + uint32_t len, val; uint8_t aq_fail = 0; PMD_INIT_FUNC_TRACE(); @@ -1241,16 +1316,32 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) hw->bus.device = pci_dev->addr.devid; hw->bus.func = pci_dev->addr.function; hw->adapter_stopped = 0; + hw->adapter_closed = 0; + + /* + * Switch Tag value should not be identical to either the First Tag + * or Second Tag values. So set something other than common Ethertype + * for internal switching. + */ + hw->switch_tag = 0xffff; + + val = I40E_READ_REG(hw, I40E_GL_FWSTS); + if (val & I40E_GL_FWSTS_FWS1B_MASK) { + PMD_INIT_LOG(ERR, "\nERROR: " + "Firmware recovery mode detected. Limiting functionality.\n" + "Refer to the Intel(R) Ethernet Adapters and Devices " + "User Guide for details on firmware recovery mode."); + return -EIO; + } /* Check if need to support multi-driver */ i40e_support_multi_driver(dev); + /* Check if users want the latest supported vec path */ + i40e_use_latest_vec(dev); /* Make sure all is clean before doing PF reset */ i40e_clear_hw(hw); - /* Initialize the hardware */ - i40e_hw_init(dev); - /* Reset here to make sure all is clean for each PF */ ret = i40e_pf_reset(hw); if (ret) { @@ -1265,6 +1356,23 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) return ret; } + /* Initialize the parameters for adminq */ + i40e_init_adminq_parameter(hw); + ret = i40e_init_adminq(hw); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret); + return -EIO; + } + PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x", + hw->aq.fw_maj_ver, hw->aq.fw_min_ver, + hw->aq.api_maj_ver, hw->aq.api_min_ver, + ((hw->nvm.version >> 12) & 0xf), + ((hw->nvm.version >> 4) & 0xff), + (hw->nvm.version & 0xf), hw->nvm.eetrack); + + /* Initialize the hardware */ + i40e_hw_init(dev); + i40e_config_automask(pf); i40e_set_default_pctype_table(dev); @@ -1280,20 +1388,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) /* Initialize the input set for filters (hash and fd) to default value */ i40e_filter_input_set_init(pf); - /* Initialize the parameters for adminq */ - i40e_init_adminq_parameter(hw); - ret = i40e_init_adminq(hw); - if (ret != I40E_SUCCESS) { - PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret); - return -EIO; - } - PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x", - hw->aq.fw_maj_ver, hw->aq.fw_min_ver, - hw->aq.api_maj_ver, hw->aq.api_min_ver, - ((hw->nvm.version >> 12) & 0xf), - ((hw->nvm.version >> 4) & 0xff), - (hw->nvm.version & 0xf), hw->nvm.eetrack); - /* initialise the L3_MAP register */ if (!pf->support_multi_driver) { ret = i40e_aq_debug_write_global_register(hw, @@ -1620,7 +1714,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) if (ret) PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret); - if (hw->adapter_stopped == 0) + if (hw->adapter_closed == 0) i40e_dev_close(dev); dev->dev_ops = NULL; @@ -1644,9 +1738,6 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) /* uninitialize pf host driver */ i40e_pf_host_uninit(dev); - rte_free(dev->data->mac_addrs); - dev->data->mac_addrs = NULL; - /* disable uio intr before callback unregister */ rte_intr_disable(intr_handle); @@ -1703,6 +1794,10 @@ i40e_dev_configure(struct rte_eth_dev *dev) ad->tx_simple_allowed = true; ad->tx_vec_allowed = true; + /* Only legacy filter API needs the following fdir config. So when the + * legacy filter API is deprecated, the following codes should also be + * removed. + */ if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) { ret = i40e_fdir_setup(pf); if (ret != I40E_SUCCESS) { @@ -1760,7 +1855,11 @@ err_dcb: rte_free(pf->vmdq); pf->vmdq = NULL; err: - /* need to release fdir resource if exists */ + /* Need to release fdir resource if exists. + * Only legacy filter API needs the following fdir config. So when the + * legacy filter API is deprecated, the following code should also be + * removed. + */ i40e_fdir_teardown(pf); return ret; } @@ -1833,8 +1932,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, /* Write first RX queue to Link list register as the head element */ if (vsi->type != I40E_VSI_SRIOV) { uint16_t interval = - i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1, - pf->support_multi_driver); + i40e_calc_itr_interval(1, pf->support_multi_driver); if (msix_vect == I40E_MISC_VEC_ID) { I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, @@ -2030,27 +2128,40 @@ i40e_phy_conf_link(struct i40e_hw *hw, struct i40e_aq_get_phy_abilities_resp phy_ab; struct i40e_aq_set_phy_config phy_conf; enum i40e_aq_phy_type cnt; + uint8_t avail_speed; uint32_t phy_type_mask = 0; const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX | I40E_AQ_PHY_FLAG_PAUSE_RX | I40E_AQ_PHY_FLAG_PAUSE_RX | I40E_AQ_PHY_FLAG_LOW_POWER; - const uint8_t advt = I40E_LINK_SPEED_40GB | - I40E_LINK_SPEED_25GB | - I40E_LINK_SPEED_10GB | - I40E_LINK_SPEED_1GB | - I40E_LINK_SPEED_100MB; int ret = -ENOTSUP; + /* To get phy capabilities of available speeds. */ + status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab, + NULL); + if (status) { + PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n", + status); + return ret; + } + avail_speed = phy_ab.link_speed; + /* To get the current phy config. */ status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab, NULL); - if (status) + if (status) { + PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n", + status); return ret; + } - /* If link already up, no need to set up again */ - if (is_up && phy_ab.phy_type != 0) + /* If link needs to go up and it is in autoneg mode the speed is OK, + * no need to set up again. + */ + if (is_up && phy_ab.phy_type != 0 && + abilities & I40E_AQ_PHY_AN_ENABLED && + phy_ab.link_speed != 0) return I40E_SUCCESS; memset(&phy_conf, 0, sizeof(phy_conf)); @@ -2059,15 +2170,17 @@ i40e_phy_conf_link(struct i40e_hw *hw, abilities &= ~mask; abilities |= phy_ab.abilities & mask; - /* update ablities and speed */ - if (abilities & I40E_AQ_PHY_AN_ENABLED) - phy_conf.link_speed = advt; - else - phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed; - phy_conf.abilities = abilities; - + /* If link needs to go up, but the force speed is not supported, + * Warn users and config the default available speeds. + */ + if (is_up && !(force_speed & avail_speed)) { + PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n"); + phy_conf.link_speed = avail_speed; + } else { + phy_conf.link_speed = is_up ? force_speed : avail_speed; + } /* PHY type mask needs to include each type except PHY type extension */ for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++) @@ -2103,11 +2216,18 @@ i40e_apply_link_speed(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_conf *conf = &dev->data->dev_conf; + if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) { + conf->link_speeds = ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_20G | + ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_100M; + } speed = i40e_parse_link_speeds(conf->link_speeds); - abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; - if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED)) - abilities |= I40E_AQ_PHY_AN_ENABLED; - abilities |= I40E_AQ_PHY_LINK_ENABLED; + abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK | + I40E_AQ_PHY_AN_ENABLED | + I40E_AQ_PHY_LINK_ENABLED; return i40e_phy_conf_link(hw, abilities, speed, true); } @@ -2224,13 +2344,6 @@ i40e_dev_start(struct rte_eth_dev *dev) } /* Apply link configure */ - if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M | - ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | - ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | - ETH_LINK_SPEED_40G)) { - PMD_DRV_LOG(ERR, "Invalid link setting"); - goto err_up; - } ret = i40e_apply_link_speed(dev); if (I40E_SUCCESS != ret) { PMD_DRV_LOG(ERR, "Fail to apply link setting"); @@ -2260,8 +2373,13 @@ i40e_dev_start(struct rte_eth_dev *dev) i40e_dev_link_update(dev, 0); } - /* enable uio intr after callback register */ - rte_intr_enable(intr_handle); + if (dev->data->dev_conf.intr_conf.rxq == 0) { + rte_eal_alarm_set(I40E_ALARM_INTERVAL, + i40e_dev_alarm_handler, dev); + } else { + /* enable uio intr after callback register */ + rte_intr_enable(intr_handle); + } i40e_filter_restore(pf); @@ -2291,6 +2409,12 @@ i40e_dev_stop(struct rte_eth_dev *dev) if (hw->adapter_stopped == 1) return; + + if (dev->data->dev_conf.intr_conf.rxq == 0) { + rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev); + rte_intr_enable(intr_handle); + } + /* Disable all queues */ i40e_dev_switch_queues(pf, FALSE); @@ -2330,6 +2454,8 @@ i40e_dev_stop(struct rte_eth_dev *dev) pf->tm_conf.committed = false; hw->adapter_stopped = 1; + + pf->adapter->rss_reta_updated = 0; } static void @@ -2373,6 +2499,11 @@ i40e_dev_close(struct rte_eth_dev *dev) i40e_pf_disable_irq0(hw); rte_intr_disable(intr_handle); + /* + * Only legacy filter API needs the following fdir config. So when the + * legacy filter API is deprecated, the following code should also be + * removed. + */ i40e_fdir_teardown(pf); /* shutdown and destroy the HMC */ @@ -2404,6 +2535,8 @@ i40e_dev_close(struct rte_eth_dev *dev) I40E_WRITE_REG(hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); I40E_WRITE_FLUSH(hw); + + hw->adapter_closed = 1; } /* @@ -2465,6 +2598,10 @@ i40e_dev_promiscuous_disable(struct rte_eth_dev *dev) if (status != I40E_SUCCESS) PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous"); + /* must remain in all_multicast mode */ + if (dev->data->all_multicast == 1) + return; + status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, false, NULL); if (status != I40E_SUCCESS) @@ -3037,20 +3174,20 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */ + struct i40e_vsi *vsi; unsigned i; /* call read registers - updates values, now write them to struct */ i40e_read_stats_registers(pf, hw); - stats->ipackets = ns->eth.rx_unicast + - ns->eth.rx_multicast + - ns->eth.rx_broadcast - - ns->eth.rx_discards - + stats->ipackets = pf->main_vsi->eth_stats.rx_unicast + + pf->main_vsi->eth_stats.rx_multicast + + pf->main_vsi->eth_stats.rx_broadcast - pf->main_vsi->eth_stats.rx_discards; stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast + ns->eth.tx_broadcast; - stats->ibytes = ns->eth.rx_bytes; + stats->ibytes = pf->main_vsi->eth_stats.rx_bytes; stats->obytes = ns->eth.tx_bytes; stats->oerrors = ns->eth.tx_errors + pf->main_vsi->eth_stats.tx_errors; @@ -3062,6 +3199,21 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) ns->rx_length_errors + ns->rx_undersize + ns->rx_oversize + ns->rx_fragments + ns->rx_jabber; + if (pf->vfs) { + for (i = 0; i < pf->vf_num; i++) { + vsi = pf->vfs[i].vsi; + i40e_update_vsi_stats(vsi); + + stats->ipackets += (vsi->eth_stats.rx_unicast + + vsi->eth_stats.rx_multicast + + vsi->eth_stats.rx_broadcast - + vsi->eth_stats.rx_discards); + stats->ibytes += vsi->eth_stats.rx_bytes; + stats->oerrors += vsi->eth_stats.tx_errors; + stats->imissed += vsi->eth_stats.rx_discards; + } + } + PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************"); PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes); PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast); @@ -3330,7 +3482,8 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER | DEV_RX_OFFLOAD_VLAN_EXTEND | DEV_RX_OFFLOAD_VLAN_FILTER | DEV_RX_OFFLOAD_JUMBO_FRAME; @@ -3543,7 +3696,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, if (vlan_type == ETH_VLAN_TYPE_OUTER) hw->second_tag = rte_cpu_to_le_16(tpid); } - ret = i40e_aq_set_switch_config(hw, 0, 0, NULL); + ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Set switch config failed aq_err: %d", @@ -4131,6 +4284,8 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev, } ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size); + pf->adapter->rss_reta_updated = 1; + out: rte_free(lut); @@ -4335,7 +4490,6 @@ i40e_get_cap(struct i40e_hw *hw) } #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4 -#define QUEUE_NUM_PER_VF_ARG "queue-num-per-vf" static int i40e_pf_parse_vf_queue_number_handler(const char *key, const char *value, @@ -4369,9 +4523,9 @@ static int i40e_pf_parse_vf_queue_number_handler(const char *key, static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev) { - static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL}; struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct rte_kvargs *kvlist; + int kvargs_count; /* set default queue number per VF as 4 */ pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF; @@ -4383,12 +4537,18 @@ static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev) if (kvlist == NULL) return -(EINVAL); - if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1) + kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG); + if (!kvargs_count) { + rte_kvargs_free(kvlist); + return 0; + } + + if (kvargs_count > 1) PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only " "the first invalid or last valid one is used !", - QUEUE_NUM_PER_VF_ARG); + ETH_I40E_QUEUE_NUM_PER_VF_ARG); - rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG, + rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG, i40e_pf_parse_vf_queue_number_handler, pf); rte_kvargs_free(kvlist); @@ -5243,7 +5403,7 @@ i40e_enable_pf_lb(struct i40e_pf *pf) int ret; /* Use the FW API if FW >= v5.0 */ - if (hw->aq.fw_maj_ver < 5) { + if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) { PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback"); return; } @@ -5514,7 +5674,7 @@ i40e_vsi_setup(struct i40e_pf *pf, ctxt.flags = I40E_AQ_VSI_TYPE_VF; /* Use the VEB configuration if FW >= v5.0 */ - if (hw->aq.fw_maj_ver >= 5) { + if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) { /* Configure switch ID */ ctxt.info.valid_sections |= rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID); @@ -6510,7 +6670,53 @@ i40e_dev_interrupt_handler(void *param) done: /* Enable interrupt */ i40e_pf_enable_irq0(hw); - rte_intr_enable(dev->intr_handle); +} + +static void +i40e_dev_alarm_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t icr0; + + /* Disable interrupt */ + i40e_pf_disable_irq0(hw); + + /* read out interrupt causes */ + icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0); + + /* No interrupt event indicated */ + if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) + goto done; + if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK) + PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error"); + if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) + PMD_DRV_LOG(ERR, "ICR0: malicious programming detected"); + if (icr0 & I40E_PFINT_ICR0_GRST_MASK) + PMD_DRV_LOG(INFO, "ICR0: global reset requested"); + if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) + PMD_DRV_LOG(INFO, "ICR0: PCI exception activated"); + if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK) + PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state"); + if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) + PMD_DRV_LOG(ERR, "ICR0: HMC error"); + if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK) + PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error"); + + if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { + PMD_DRV_LOG(INFO, "ICR0: VF reset detected"); + i40e_dev_handle_vfr_event(dev); + } + if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { + PMD_DRV_LOG(INFO, "ICR0: adminq event"); + i40e_dev_handle_aq_msg(dev); + } + +done: + /* Enable interrupt */ + i40e_pf_enable_irq0(hw); + rte_eal_alarm_set(I40E_ALARM_INTERVAL, + i40e_dev_alarm_handler, dev); } int @@ -7201,7 +7407,7 @@ i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len) int ret; if (!key || !key_len) - return -EINVAL; + return 0; if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { ret = i40e_aq_get_rss_key(hw, vsi->vsi_id, @@ -7284,9 +7490,15 @@ i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint64_t hena; + int ret; + + if (!rss_conf) + return -EINVAL; - i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key, + ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key, &rss_conf->rss_key_len); + if (ret) + return ret; hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; @@ -7331,7 +7543,7 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag) /* Convert tunnel filter structure */ static int i40e_tunnel_filter_convert( - struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter, + struct i40e_aqc_cloud_filters_element_bb *cld_filter, struct i40e_tunnel_filter *tunnel_filter) { ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac, @@ -7429,8 +7641,8 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, int val, ret = 0; struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_vsi *vsi = pf->main_vsi; - struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter; - struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter; + struct i40e_aqc_cloud_filters_element_bb *cld_filter; + struct i40e_aqc_cloud_filters_element_bb *pfilter; struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; struct i40e_tunnel_filter *tunnel, *node; struct i40e_tunnel_filter check_filter; /* Check if filter exists */ @@ -7538,7 +7750,7 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, if (ret < 0) rte_free(tunnel); } else { - ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, + ret = i40e_aq_rem_cloud_filters(hw, vsi->seid, &cld_filter->element, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); @@ -7871,8 +8083,8 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, struct i40e_pf_vf *vf = NULL; struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_vsi *vsi; - struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter; - struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter; + struct i40e_aqc_cloud_filters_element_bb *cld_filter; + struct i40e_aqc_cloud_filters_element_bb *pfilter; struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; struct i40e_tunnel_filter *tunnel, *node; struct i40e_tunnel_filter check_filter; /* Check if filter exists */ @@ -8075,7 +8287,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, if (add) { if (big_buffer) - ret = i40e_aq_add_cloud_filters_big_buffer(hw, + ret = i40e_aq_add_cloud_filters_bb(hw, vsi->seid, cld_filter, 1); else ret = i40e_aq_add_cloud_filters(hw, @@ -8098,11 +8310,11 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, rte_free(tunnel); } else { if (big_buffer) - ret = i40e_aq_remove_cloud_filters_big_buffer( + ret = i40e_aq_rem_cloud_filters_bb( hw, vsi->seid, cld_filter, 1); else - ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, - &cld_filter->element, 1); + ret = i40e_aq_rem_cloud_filters(hw, vsi->seid, + &cld_filter->element, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); rte_free(cld_filter); @@ -8314,13 +8526,16 @@ i40e_pf_config_rss(struct i40e_pf *pf) return -ENOTSUP; } - for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { - if (j == num) - j = 0; - lut = (lut << 8) | (j & ((0x1 << - hw->func_caps.rss_table_entry_width) - 1)); - if ((i & 3) == 3) - I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut); + if (pf->adapter->rss_reta_updated == 0) { + for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { + if (j == num) + j = 0; + lut = (lut << 8) | (j & ((0x1 << + hw->func_caps.rss_table_entry_width) - 1)); + if ((i & 3) == 3) + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), + rte_bswap32(lut)); + } } rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf; @@ -10014,6 +10229,60 @@ i40e_pctype_to_flowtype(const struct i40e_adapter *adapter, #define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606 #define I40E_GL_SWR_PM_UP_THR 0x269FBC +/* + * GL_SWR_PM_UP_THR: + * The value is not impacted from the link speed, its value is set according + * to the total number of ports for a better pipe-monitor configuration. + */ +static bool +i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value) +{ +#define I40E_GL_SWR_PM_EF_DEVICE(dev) \ + .device_id = (dev), \ + .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE + +#define I40E_GL_SWR_PM_SF_DEVICE(dev) \ + .device_id = (dev), \ + .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE + + static const struct { + uint16_t device_id; + uint32_t val; + } swr_pm_table[] = { + { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) }, + { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) }, + { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) }, + { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) }, + + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) }, + }; + uint32_t i; + + if (value == NULL) { + PMD_DRV_LOG(ERR, "value is NULL"); + return false; + } + + for (i = 0; i < RTE_DIM(swr_pm_table); i++) { + if (hw->device_id == swr_pm_table[i].device_id) { + *value = swr_pm_table[i].val; + + PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR " + "value - 0x%08x", + hw->device_id, *value); + return true; + } + } + + return false; +} + static int i40e_dev_sync_phy_type(struct i40e_hw *hw) { @@ -10078,13 +10347,16 @@ i40e_configure_registers(struct i40e_hw *hw) } if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) { - if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */ - I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */ - reg_table[i].val = - I40E_GL_SWR_PM_UP_THR_SF_VALUE; - else /* For X710 */ - reg_table[i].val = - I40E_GL_SWR_PM_UP_THR_EF_VALUE; + uint32_t cfg_val; + + if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) { + PMD_DRV_LOG(DEBUG, "Device 0x%x skips " + "GL_SWR_PM_UP_THR value fixup", + hw->device_id); + continue; + } + + reg_table[i].val = cfg_val; } ret = i40e_aq_debug_read_register(hw, reg_table[i].addr, @@ -11153,6 +11425,16 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) * LLDP MIB change event. */ if (sw_dcb == TRUE) { + /* When using NVM 6.01 or later, the RX data path does + * not hang if the FW LLDP is stopped. + */ + if (((hw->nvm.version >> 12) & 0xf) >= 6 && + ((hw->nvm.version >> 4) & 0xff) >= 1) { + ret = i40e_aq_stop_lldp(hw, TRUE, NULL); + if (ret != I40E_SUCCESS) + PMD_INIT_LOG(DEBUG, "Failed to stop lldp"); + } + ret = i40e_init_dcb(hw); /* If lldp agent is stopped, the return value from * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM @@ -11367,6 +11649,32 @@ i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) return 0; } +/** + * This function is used to check if the register is valid. + * Below is the valid registers list for X722 only: + * 0x2b800--0x2bb00 + * 0x38700--0x38a00 + * 0x3d800--0x3db00 + * 0x208e00--0x209000 + * 0x20be00--0x20c000 + * 0x263c00--0x264000 + * 0x265c00--0x266000 + */ +static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset) +{ + if ((type != I40E_MAC_X722) && + ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) || + (reg_offset >= 0x38700 && reg_offset <= 0x38a00) || + (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) || + (reg_offset >= 0x208e00 && reg_offset <= 0x209000) || + (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) || + (reg_offset >= 0x263c00 && reg_offset <= 0x264000) || + (reg_offset >= 0x265c00 && reg_offset <= 0x266000))) + return 0; + else + return 1; +} + static int i40e_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) { @@ -11408,8 +11716,11 @@ static int i40e_get_regs(struct rte_eth_dev *dev, reg_offset = arr_idx * reg_info->stride1 + arr_idx2 * reg_info->stride2; reg_offset += reg_info->base_addr; - ptr_data[reg_offset >> 2] = - I40E_READ_REG(hw, reg_offset); + if (!i40e_valid_regs(hw->mac.type, reg_offset)) + ptr_data[reg_offset >> 2] = 0; + else + ptr_data[reg_offset >> 2] = + I40E_READ_REG(hw, reg_offset); } } @@ -11488,7 +11799,7 @@ static int i40e_get_module_info(struct rte_eth_dev *dev, case I40E_MODULE_TYPE_SFP: status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, - I40E_I2C_EEPROM_DEV_ADDR, + I40E_I2C_EEPROM_DEV_ADDR, 1, I40E_MODULE_SFF_8472_COMP, &sff8472_comp, NULL); if (status) @@ -11496,7 +11807,7 @@ static int i40e_get_module_info(struct rte_eth_dev *dev, status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, - I40E_I2C_EEPROM_DEV_ADDR, + I40E_I2C_EEPROM_DEV_ADDR, 1, I40E_MODULE_SFF_8472_SWAP, &sff8472_swap, NULL); if (status) @@ -11524,7 +11835,7 @@ static int i40e_get_module_info(struct rte_eth_dev *dev, /* Read from memory page 0. */ status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, - 0, + 0, 1, I40E_MODULE_REVISION_ADDR, &sff8636_rev, NULL); if (status) @@ -11585,7 +11896,7 @@ static int i40e_get_module_eeprom(struct rte_eth_dev *dev, } status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, - addr, offset, &value, NULL); + addr, offset, 1, &value, NULL); if (status) return -EIO; data[i] = (uint8_t)value; @@ -11716,7 +12027,7 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf) struct i40e_tunnel_filter_list *tunnel_list = &pf->tunnel.tunnel_list; struct i40e_tunnel_filter *f; - struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter; + struct i40e_aqc_cloud_filters_element_bb cld_filter; bool big_buffer = 0; TAILQ_FOREACH(f, tunnel_list, rules) { @@ -11751,8 +12062,8 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf) big_buffer = 1; if (big_buffer) - i40e_aq_add_cloud_filters_big_buffer(hw, - vsi->seid, &cld_filter, 1); + i40e_aq_add_cloud_filters_bb(hw, + vsi->seid, &cld_filter, 1); else i40e_aq_add_cloud_filters(hw, vsi->seid, &cld_filter.element, 1); @@ -12081,7 +12392,8 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, ptype_mapping[i].sw_ptype |= RTE_PTYPE_TUNNEL_GRENAT; in_tunnel = true; - } else if (!strncasecmp(name, "L2TPV2CTL", 9)) { + } else if (!strncasecmp(name, "L2TPV2CTL", 9) || + !strncasecmp(name, "L2TPV2", 6)) { ptype_mapping[i].sw_ptype |= RTE_PTYPE_TUNNEL_L2TP; in_tunnel = true; @@ -12309,16 +12621,19 @@ i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out, if (in->key_len > RTE_DIM(out->key) || in->queue_num > RTE_DIM(out->queue)) return -EINVAL; + if (!in->key && in->key_len) + return -EINVAL; out->conf = (struct rte_flow_action_rss){ .func = in->func, .level = in->level, .types = in->types, .key_len = in->key_len, .queue_num = in->queue_num, - .key = memcpy(out->key, in->key, in->key_len), .queue = memcpy(out->queue, in->queue, sizeof(*in->queue) * in->queue_num), }; + if (in->key) + out->conf.key = memcpy(out->key, in->key, in->key_len); return 0; } @@ -12416,9 +12731,7 @@ i40e_config_rss_filter(struct i40e_pf *pf, return 0; } -RTE_INIT(i40e_init_log); -static void -i40e_init_log(void) +RTE_INIT(i40e_init_log) { i40e_logtype_init = rte_log_register("pmd.net.i40e.init"); if (i40e_logtype_init >= 0) @@ -12429,5 +12742,8 @@ i40e_init_log(void) } RTE_PMD_REGISTER_PARAM_STRING(net_i40e, - QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16" - ETH_I40E_SUPPORT_MULTI_DRIVER "=1"); + ETH_I40E_FLOATING_VEB_ARG "=1" + ETH_I40E_FLOATING_VEB_LIST_ARG "=" + ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16" + ETH_I40E_SUPPORT_MULTI_DRIVER "=1" + ETH_I40E_USE_LATEST_VEC "=0|1");