X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev.c;h=8191a6a736ffce1d1d29a40cc67191b16d9d20b1;hb=646d3f20aa5140ae6152e3b30e59fae9dde3d6a8;hp=1847f228226b17ef066d26d6f47f8ed5eb17cb6b;hpb=55c7fbe42d35c07d7cbbbcd8bff2e985c537d867;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 1847f22822..8191a6a736 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -391,7 +391,7 @@ static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf, struct i40e_ethertype_filter *filter); static int i40e_tunnel_filter_convert( - struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter, + struct i40e_aqc_cloud_filters_element_bb *cld_filter, struct i40e_tunnel_filter *tunnel_filter); static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf, struct i40e_tunnel_filter *tunnel_filter); @@ -1273,7 +1273,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *vsi; int ret; - uint32_t len; + uint32_t len, val; uint8_t aq_fail = 0; PMD_INIT_FUNC_TRACE(); @@ -1316,6 +1316,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) hw->bus.device = pci_dev->addr.devid; hw->bus.func = pci_dev->addr.function; hw->adapter_stopped = 0; + hw->adapter_closed = 0; /* * Switch Tag value should not be identical to either the First Tag @@ -1324,6 +1325,15 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) */ hw->switch_tag = 0xffff; + val = I40E_READ_REG(hw, I40E_GL_FWSTS); + if (val & I40E_GL_FWSTS_FWS1B_MASK) { + PMD_INIT_LOG(ERR, "\nERROR: " + "Firmware recovery mode detected. Limiting functionality.\n" + "Refer to the Intel(R) Ethernet Adapters and Devices " + "User Guide for details on firmware recovery mode."); + return -EIO; + } + /* Check if need to support multi-driver */ i40e_support_multi_driver(dev); /* Check if users want the latest supported vec path */ @@ -1332,9 +1342,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) /* Make sure all is clean before doing PF reset */ i40e_clear_hw(hw); - /* Initialize the hardware */ - i40e_hw_init(dev); - /* Reset here to make sure all is clean for each PF */ ret = i40e_pf_reset(hw); if (ret) { @@ -1349,6 +1356,23 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) return ret; } + /* Initialize the parameters for adminq */ + i40e_init_adminq_parameter(hw); + ret = i40e_init_adminq(hw); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret); + return -EIO; + } + PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x", + hw->aq.fw_maj_ver, hw->aq.fw_min_ver, + hw->aq.api_maj_ver, hw->aq.api_min_ver, + ((hw->nvm.version >> 12) & 0xf), + ((hw->nvm.version >> 4) & 0xff), + (hw->nvm.version & 0xf), hw->nvm.eetrack); + + /* Initialize the hardware */ + i40e_hw_init(dev); + i40e_config_automask(pf); i40e_set_default_pctype_table(dev); @@ -1364,20 +1388,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) /* Initialize the input set for filters (hash and fd) to default value */ i40e_filter_input_set_init(pf); - /* Initialize the parameters for adminq */ - i40e_init_adminq_parameter(hw); - ret = i40e_init_adminq(hw); - if (ret != I40E_SUCCESS) { - PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret); - return -EIO; - } - PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x", - hw->aq.fw_maj_ver, hw->aq.fw_min_ver, - hw->aq.api_maj_ver, hw->aq.api_min_ver, - ((hw->nvm.version >> 12) & 0xf), - ((hw->nvm.version >> 4) & 0xff), - (hw->nvm.version & 0xf), hw->nvm.eetrack); - /* initialise the L3_MAP register */ if (!pf->support_multi_driver) { ret = i40e_aq_debug_write_global_register(hw, @@ -1483,9 +1493,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) goto err_setup_pf_switch; } - /* reset all stats of the device, including pf and main vsi */ - i40e_dev_stats_reset(dev); - vsi = pf->main_vsi; /* Disable double vlan by default */ @@ -1580,6 +1587,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) memset(&pf->rss_info, 0, sizeof(struct i40e_rte_flow_rss_conf)); + /* reset all stats of the device, including pf and main vsi */ + i40e_dev_stats_reset(dev); + return 0; err_init_fdir_filter_list: @@ -1704,7 +1714,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) if (ret) PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret); - if (hw->adapter_stopped == 0) + if (hw->adapter_closed == 0) i40e_dev_close(dev); dev->dev_ops = NULL; @@ -1728,9 +1738,6 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) /* uninitialize pf host driver */ i40e_pf_host_uninit(dev); - rte_free(dev->data->mac_addrs); - dev->data->mac_addrs = NULL; - /* disable uio intr before callback unregister */ rte_intr_disable(intr_handle); @@ -1787,6 +1794,10 @@ i40e_dev_configure(struct rte_eth_dev *dev) ad->tx_simple_allowed = true; ad->tx_vec_allowed = true; + /* Only legacy filter API needs the following fdir config. So when the + * legacy filter API is deprecated, the following codes should also be + * removed. + */ if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) { ret = i40e_fdir_setup(pf); if (ret != I40E_SUCCESS) { @@ -1844,7 +1855,11 @@ err_dcb: rte_free(pf->vmdq); pf->vmdq = NULL; err: - /* need to release fdir resource if exists */ + /* Need to release fdir resource if exists. + * Only legacy filter API needs the following fdir config. So when the + * legacy filter API is deprecated, the following code should also be + * removed. + */ i40e_fdir_teardown(pf); return ret; } @@ -2439,6 +2454,8 @@ i40e_dev_stop(struct rte_eth_dev *dev) pf->tm_conf.committed = false; hw->adapter_stopped = 1; + + pf->adapter->rss_reta_updated = 0; } static void @@ -2482,6 +2499,11 @@ i40e_dev_close(struct rte_eth_dev *dev) i40e_pf_disable_irq0(hw); rte_intr_disable(intr_handle); + /* + * Only legacy filter API needs the following fdir config. So when the + * legacy filter API is deprecated, the following code should also be + * removed. + */ i40e_fdir_teardown(pf); /* shutdown and destroy the HMC */ @@ -2513,6 +2535,8 @@ i40e_dev_close(struct rte_eth_dev *dev) I40E_WRITE_REG(hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); I40E_WRITE_FLUSH(hw); + + hw->adapter_closed = 1; } /* @@ -2574,6 +2598,10 @@ i40e_dev_promiscuous_disable(struct rte_eth_dev *dev) if (status != I40E_SUCCESS) PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous"); + /* must remain in all_multicast mode */ + if (dev->data->all_multicast == 1) + return; + status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, false, NULL); if (status != I40E_SUCCESS) @@ -3146,20 +3174,20 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */ + struct i40e_vsi *vsi; unsigned i; /* call read registers - updates values, now write them to struct */ i40e_read_stats_registers(pf, hw); - stats->ipackets = ns->eth.rx_unicast + - ns->eth.rx_multicast + - ns->eth.rx_broadcast - - ns->eth.rx_discards - + stats->ipackets = pf->main_vsi->eth_stats.rx_unicast + + pf->main_vsi->eth_stats.rx_multicast + + pf->main_vsi->eth_stats.rx_broadcast - pf->main_vsi->eth_stats.rx_discards; stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast + ns->eth.tx_broadcast; - stats->ibytes = ns->eth.rx_bytes; + stats->ibytes = pf->main_vsi->eth_stats.rx_bytes; stats->obytes = ns->eth.tx_bytes; stats->oerrors = ns->eth.tx_errors + pf->main_vsi->eth_stats.tx_errors; @@ -3171,6 +3199,21 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) ns->rx_length_errors + ns->rx_undersize + ns->rx_oversize + ns->rx_fragments + ns->rx_jabber; + if (pf->vfs) { + for (i = 0; i < pf->vf_num; i++) { + vsi = pf->vfs[i].vsi; + i40e_update_vsi_stats(vsi); + + stats->ipackets += (vsi->eth_stats.rx_unicast + + vsi->eth_stats.rx_multicast + + vsi->eth_stats.rx_broadcast - + vsi->eth_stats.rx_discards); + stats->ibytes += vsi->eth_stats.rx_bytes; + stats->oerrors += vsi->eth_stats.tx_errors; + stats->imissed += vsi->eth_stats.rx_discards; + } + } + PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************"); PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes); PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast); @@ -3417,6 +3460,31 @@ i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) return 0; } +/* + * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later, + * the Rx data path does not hang if the FW LLDP is stopped. + * return true if lldp need to stop + * return false if we cannot disable the LLDP to avoid Rx data path blocking. + */ +static bool +i40e_need_stop_lldp(struct rte_eth_dev *dev) +{ + double nvm_ver; + char ver_str[64] = {0}; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + i40e_fw_version_get(dev, ver_str, 64); + nvm_ver = atof(ver_str); + if ((hw->mac.type == I40E_MAC_X722 || + hw->mac.type == I40E_MAC_X722_VF) && + ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000))) + return true; + else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000)) + return true; + + return false; +} + static void i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { @@ -3440,6 +3508,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER | DEV_RX_OFFLOAD_VLAN_EXTEND | DEV_RX_OFFLOAD_VLAN_FILTER | DEV_RX_OFFLOAD_JUMBO_FRAME; @@ -3652,7 +3721,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, if (vlan_type == ETH_VLAN_TYPE_OUTER) hw->second_tag = rte_cpu_to_le_16(tpid); } - ret = i40e_aq_set_switch_config(hw, 0, 0, NULL); + ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Set switch config failed aq_err: %d", @@ -4139,7 +4208,8 @@ i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) return -EINVAL; if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { - ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE, + ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, + vsi->type != I40E_VSI_SRIOV, lut, lut_size); if (ret) { PMD_DRV_LOG(ERR, "Failed to get RSS lookup table"); @@ -4178,7 +4248,8 @@ i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) hw = I40E_VSI_TO_HW(vsi); if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { - ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE, + ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, + vsi->type != I40E_VSI_SRIOV, lut, lut_size); if (ret) { PMD_DRV_LOG(ERR, "Failed to set RSS lookup table"); @@ -4240,6 +4311,8 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev, } ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size); + pf->adapter->rss_reta_updated = 1; + out: rte_free(lut); @@ -5357,7 +5430,7 @@ i40e_enable_pf_lb(struct i40e_pf *pf) int ret; /* Use the FW API if FW >= v5.0 */ - if (hw->aq.fw_maj_ver < 5) { + if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) { PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback"); return; } @@ -5628,7 +5701,7 @@ i40e_vsi_setup(struct i40e_pf *pf, ctxt.flags = I40E_AQ_VSI_TYPE_VF; /* Use the VEB configuration if FW >= v5.0 */ - if (hw->aq.fw_maj_ver >= 5) { + if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) { /* Configure switch ID */ ctxt.info.valid_sections |= rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID); @@ -6624,7 +6697,6 @@ i40e_dev_interrupt_handler(void *param) done: /* Enable interrupt */ i40e_pf_enable_irq0(hw); - rte_intr_enable(dev->intr_handle); } static void @@ -7362,7 +7434,7 @@ i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len) int ret; if (!key || !key_len) - return -EINVAL; + return 0; if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { ret = i40e_aq_get_rss_key(hw, vsi->vsi_id, @@ -7445,9 +7517,15 @@ i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint64_t hena; + int ret; + + if (!rss_conf) + return -EINVAL; - i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key, + ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key, &rss_conf->rss_key_len); + if (ret) + return ret; hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; @@ -7492,7 +7570,7 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag) /* Convert tunnel filter structure */ static int i40e_tunnel_filter_convert( - struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter, + struct i40e_aqc_cloud_filters_element_bb *cld_filter, struct i40e_tunnel_filter *tunnel_filter) { ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac, @@ -7590,8 +7668,8 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, int val, ret = 0; struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_vsi *vsi = pf->main_vsi; - struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter; - struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter; + struct i40e_aqc_cloud_filters_element_bb *cld_filter; + struct i40e_aqc_cloud_filters_element_bb *pfilter; struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; struct i40e_tunnel_filter *tunnel, *node; struct i40e_tunnel_filter check_filter; /* Check if filter exists */ @@ -7699,7 +7777,7 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, if (ret < 0) rte_free(tunnel); } else { - ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, + ret = i40e_aq_rem_cloud_filters(hw, vsi->seid, &cld_filter->element, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); @@ -8032,8 +8110,8 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, struct i40e_pf_vf *vf = NULL; struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_vsi *vsi; - struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter; - struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter; + struct i40e_aqc_cloud_filters_element_bb *cld_filter; + struct i40e_aqc_cloud_filters_element_bb *pfilter; struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; struct i40e_tunnel_filter *tunnel, *node; struct i40e_tunnel_filter check_filter; /* Check if filter exists */ @@ -8236,7 +8314,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, if (add) { if (big_buffer) - ret = i40e_aq_add_cloud_filters_big_buffer(hw, + ret = i40e_aq_add_cloud_filters_bb(hw, vsi->seid, cld_filter, 1); else ret = i40e_aq_add_cloud_filters(hw, @@ -8259,11 +8337,11 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, rte_free(tunnel); } else { if (big_buffer) - ret = i40e_aq_remove_cloud_filters_big_buffer( + ret = i40e_aq_rem_cloud_filters_bb( hw, vsi->seid, cld_filter, 1); else - ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, - &cld_filter->element, 1); + ret = i40e_aq_rem_cloud_filters(hw, vsi->seid, + &cld_filter->element, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); rte_free(cld_filter); @@ -8475,13 +8553,16 @@ i40e_pf_config_rss(struct i40e_pf *pf) return -ENOTSUP; } - for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { - if (j == num) - j = 0; - lut = (lut << 8) | (j & ((0x1 << - hw->func_caps.rss_table_entry_width) - 1)); - if ((i & 3) == 3) - I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut); + if (pf->adapter->rss_reta_updated == 0) { + for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { + if (j == num) + j = 0; + lut = (lut << 8) | (j & ((0x1 << + hw->func_caps.rss_table_entry_width) - 1)); + if ((i & 3) == 3) + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), + rte_bswap32(lut)); + } } rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf; @@ -10749,6 +10830,7 @@ i40e_start_timecounters(struct rte_eth_dev *dev) switch (link.link_speed) { case ETH_SPEED_NUM_40G: + case ETH_SPEED_NUM_25G: tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF; tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32; break; @@ -11371,11 +11453,7 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) * LLDP MIB change event. */ if (sw_dcb == TRUE) { - /* When using NVM 6.01 or later, the RX data path does - * not hang if the FW LLDP is stopped. - */ - if (((hw->nvm.version >> 12) & 0xf) >= 6 && - ((hw->nvm.version >> 4) & 0xff) >= 1) { + if (i40e_need_stop_lldp(dev)) { ret = i40e_aq_stop_lldp(hw, TRUE, NULL); if (ret != I40E_SUCCESS) PMD_INIT_LOG(DEBUG, "Failed to stop lldp"); @@ -11595,6 +11673,32 @@ i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) return 0; } +/** + * This function is used to check if the register is valid. + * Below is the valid registers list for X722 only: + * 0x2b800--0x2bb00 + * 0x38700--0x38a00 + * 0x3d800--0x3db00 + * 0x208e00--0x209000 + * 0x20be00--0x20c000 + * 0x263c00--0x264000 + * 0x265c00--0x266000 + */ +static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset) +{ + if ((type != I40E_MAC_X722) && + ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) || + (reg_offset >= 0x38700 && reg_offset <= 0x38a00) || + (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) || + (reg_offset >= 0x208e00 && reg_offset <= 0x209000) || + (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) || + (reg_offset >= 0x263c00 && reg_offset <= 0x264000) || + (reg_offset >= 0x265c00 && reg_offset <= 0x266000))) + return 0; + else + return 1; +} + static int i40e_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) { @@ -11636,8 +11740,11 @@ static int i40e_get_regs(struct rte_eth_dev *dev, reg_offset = arr_idx * reg_info->stride1 + arr_idx2 * reg_info->stride2; reg_offset += reg_info->base_addr; - ptr_data[reg_offset >> 2] = - I40E_READ_REG(hw, reg_offset); + if (!i40e_valid_regs(hw->mac.type, reg_offset)) + ptr_data[reg_offset >> 2] = 0; + else + ptr_data[reg_offset >> 2] = + I40E_READ_REG(hw, reg_offset); } } @@ -11716,7 +11823,7 @@ static int i40e_get_module_info(struct rte_eth_dev *dev, case I40E_MODULE_TYPE_SFP: status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, - I40E_I2C_EEPROM_DEV_ADDR, + I40E_I2C_EEPROM_DEV_ADDR, 1, I40E_MODULE_SFF_8472_COMP, &sff8472_comp, NULL); if (status) @@ -11724,7 +11831,7 @@ static int i40e_get_module_info(struct rte_eth_dev *dev, status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, - I40E_I2C_EEPROM_DEV_ADDR, + I40E_I2C_EEPROM_DEV_ADDR, 1, I40E_MODULE_SFF_8472_SWAP, &sff8472_swap, NULL); if (status) @@ -11752,7 +11859,7 @@ static int i40e_get_module_info(struct rte_eth_dev *dev, /* Read from memory page 0. */ status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, - 0, + 0, 1, I40E_MODULE_REVISION_ADDR, &sff8636_rev, NULL); if (status) @@ -11813,7 +11920,7 @@ static int i40e_get_module_eeprom(struct rte_eth_dev *dev, } status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, - addr, offset, &value, NULL); + addr, offset, 1, &value, NULL); if (status) return -EIO; data[i] = (uint8_t)value; @@ -11944,7 +12051,7 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf) struct i40e_tunnel_filter_list *tunnel_list = &pf->tunnel.tunnel_list; struct i40e_tunnel_filter *f; - struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter; + struct i40e_aqc_cloud_filters_element_bb cld_filter; bool big_buffer = 0; TAILQ_FOREACH(f, tunnel_list, rules) { @@ -11979,8 +12086,8 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf) big_buffer = 1; if (big_buffer) - i40e_aq_add_cloud_filters_big_buffer(hw, - vsi->seid, &cld_filter, 1); + i40e_aq_add_cloud_filters_bb(hw, + vsi->seid, &cld_filter, 1); else i40e_aq_add_cloud_filters(hw, vsi->seid, &cld_filter.element, 1); @@ -12538,16 +12645,19 @@ i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out, if (in->key_len > RTE_DIM(out->key) || in->queue_num > RTE_DIM(out->queue)) return -EINVAL; + if (!in->key && in->key_len) + return -EINVAL; out->conf = (struct rte_flow_action_rss){ .func = in->func, .level = in->level, .types = in->types, .key_len = in->key_len, .queue_num = in->queue_num, - .key = memcpy(out->key, in->key, in->key_len), .queue = memcpy(out->queue, in->queue, sizeof(*in->queue) * in->queue_num), }; + if (in->key) + out->conf.key = memcpy(out->key, in->key, in->key_len); return 0; }