X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev.c;h=180ac7449702d0c135a077344ac433408cd47b98;hb=caccf8b318cafcdafe39faa3c5ce3eef67007621;hp=2f1cd851eece785ec4200224f3af80c579b1218a;hpb=98abce237ba75e06e30f92bf361bc0de22098cb4;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 2f1cd851ee..180ac74497 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include @@ -43,8 +14,9 @@ #include #include #include +#include #include -#include +#include #include #include #include @@ -65,6 +37,7 @@ #include "i40e_rxtx.h" #include "i40e_pf.h" #include "i40e_regs.h" +#include "rte_pmd_i40e.h" #define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb" #define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list" @@ -74,7 +47,7 @@ /* Maximun number of capability elements */ #define I40E_MAX_CAP_ELE_NUM 128 -/* Wait count and inteval */ +/* Wait count and interval */ #define I40E_CHK_Q_ENA_COUNT 1000 #define I40E_CHK_Q_ENA_INTERVAL_US 1000 @@ -86,12 +59,6 @@ /* Flow control default timer */ #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU -/* Flow control default high water */ -#define I40E_DEFAULT_HIGH_WATER (0x1C40/1024) - -/* Flow control default low water */ -#define I40E_DEFAULT_LOW_WATER (0x1A40/1024) - /* Flow control enable fwd bit */ #define I40E_PRTMAC_FWD_CTRL 0x00000001 @@ -101,6 +68,12 @@ /* Kilobytes shift */ #define I40E_KILOSHIFT 10 +/* Flow control default high water */ +#define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT) + +/* Flow control default low water */ +#define I40E_DEFAULT_LOW_WATER (0xF2000 >> I40E_KILOSHIFT) + /* Receive Average Packet Size in Byte*/ #define I40E_PACKET_AVERAGE_SIZE 128 @@ -137,10 +110,6 @@ #define I40E_PRTTSYN_TSYNTYPE 0x0e000000 #define I40E_CYCLECOUNTER_MASK 0xffffffffffffffffULL -#define I40E_MAX_PERCENT 100 -#define I40E_DEFAULT_DCB_APP_NUM 1 -#define I40E_DEFAULT_DCB_APP_PRIO 3 - /** * Below are values for writing un-exposed registers suggested * by silicon experts @@ -250,13 +219,14 @@ static int i40e_dev_configure(struct rte_eth_dev *dev); static int i40e_dev_start(struct rte_eth_dev *dev); static void i40e_dev_stop(struct rte_eth_dev *dev); static void i40e_dev_close(struct rte_eth_dev *dev); +static int i40e_dev_reset(struct rte_eth_dev *dev); static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev); static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev); static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev); static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev); static int i40e_dev_set_link_up(struct rte_eth_dev *dev); static int i40e_dev_set_link_down(struct rte_eth_dev *dev); -static void i40e_dev_stats_get(struct rte_eth_dev *dev, +static int i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static int i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n); @@ -278,7 +248,7 @@ static int i40e_vlan_filter_set(struct rte_eth_dev *dev, static int i40e_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, uint16_t tpid); -static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); @@ -308,7 +278,6 @@ static int i40e_pf_parameter_init(struct rte_eth_dev *dev); static int i40e_pf_setup(struct i40e_pf *pf); static int i40e_dev_rxtx_init(struct i40e_pf *pf); static int i40e_vmdq_setup(struct rte_eth_dev *dev); -static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb); static int i40e_dcb_setup(struct rte_eth_dev *dev); static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg, bool offset_loaded, uint64_t *offset, uint64_t *stat); @@ -360,6 +329,12 @@ static int i40e_dev_sync_phy_type(struct i40e_hw *hw); static void i40e_configure_registers(struct i40e_hw *hw); static void i40e_hw_init(struct rte_eth_dev *dev); static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi); +static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw, + uint16_t seid, + uint16_t rule_type, + uint16_t *entries, + uint16_t count, + uint16_t rule_id); static int i40e_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, uint8_t sw_id, uint8_t on); @@ -394,7 +369,7 @@ static int i40e_get_eeprom_length(struct rte_eth_dev *dev); static int i40e_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom); -static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, +static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); @@ -449,6 +424,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .dev_start = i40e_dev_start, .dev_stop = i40e_dev_stop, .dev_close = i40e_dev_close, + .dev_reset = i40e_dev_reset, .promiscuous_enable = i40e_dev_promiscuous_enable, .promiscuous_disable = i40e_dev_promiscuous_disable, .allmulticast_enable = i40e_dev_allmulticast_enable, @@ -515,6 +491,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .get_eeprom = i40e_get_eeprom, .mac_addr_set = i40e_set_default_mac_addr, .mtu_set = i40e_dev_mtu_set, + .tm_ops_get = i40e_tm_ops_get, }; /* store statistics names and its offset in stats structure */ @@ -644,37 +621,19 @@ static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_i40e_pmd = { .id_table = pci_id_i40e_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_IOVA_AS_VA, .probe = eth_i40e_pci_probe, .remove = eth_i40e_pci_remove, }; -static inline int -rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -static inline int -rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) +static inline void +i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) { - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; + i40e_write_rx_ctl(hw, reg_addr, reg_val); + PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified " + "with value 0x%08x", + reg_addr, reg_val); } RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd); @@ -694,25 +653,31 @@ RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci"); static inline void i40e_GLQF_reg_init(struct i40e_hw *hw) { /* - * Initialize registers for flexible payload, which should be set by NVM. - * This should be removed from code once it is fixed in NVM. + * Initialize registers for parsing packet type of QinQ + * This should be removed from code once proper + * configuration API is added to avoid configuration conflicts + * between ports of the same device. */ - I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D); - I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480); - I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440); - - /* Initialize registers for parsing packet type of QinQ */ - I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029); - I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420); + i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER); +} + +static inline void i40e_config_automask(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t val; + + /* INTENA flag is not auto-cleared for interrupt */ + val = I40E_READ_REG(hw, I40E_GLINT_CTL); + val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK | + I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; + + /* If support multi-driver, PF will use INT0. */ + if (!pf->support_multi_driver) + val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK; + + I40E_WRITE_REG(hw, I40E_GLINT_CTL, val); } #define I40E_FLOW_CONTROL_ETHERTYPE 0x8808 @@ -919,7 +884,7 @@ i40e_init_ethtype_filter_list(struct rte_eth_dev *dev) /* Initialize ethertype filter rule list and hash */ TAILQ_INIT(ðertype_rule->ethertype_list); snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE, - "ethertype_%s", dev->data->name); + "ethertype_%s", dev->device->name); ethertype_rule->hash_table = rte_hash_create(ðertype_hash_params); if (!ethertype_rule->hash_table) { PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!"); @@ -964,7 +929,7 @@ i40e_init_tunnel_filter_list(struct rte_eth_dev *dev) /* Initialize tunnel filter rule list and hash */ TAILQ_INIT(&tunnel_rule->tunnel_list); snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE, - "tunnel_%s", dev->data->name); + "tunnel_%s", dev->device->name); tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params); if (!tunnel_rule->hash_table) { PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!"); @@ -1000,7 +965,7 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev) struct rte_hash_parameters fdir_hash_params = { .name = fdir_hash_name, .entries = I40E_MAX_FDIR_FILTER_NUM, - .key_len = sizeof(struct rte_eth_fdir_input), + .key_len = sizeof(struct i40e_fdir_input), .hash_func = rte_hash_crc, .hash_func_init_val = 0, .socket_id = rte_socket_id(), @@ -1009,7 +974,7 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev) /* Initialize flow director filter rule list and hash */ TAILQ_INIT(&fdir_info->fdir_list); snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, - "fdir_%s", dev->data->name); + "fdir_%s", dev->device->name); fdir_info->hash_table = rte_hash_create(&fdir_hash_params); if (!fdir_info->hash_table) { PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); @@ -1033,6 +998,97 @@ err_fdir_hash_map_alloc: return ret; } +static void +i40e_init_customized_info(struct i40e_pf *pf) +{ + int i; + + /* Initialize customized pctype */ + for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) { + pf->customized_pctype[i].index = i; + pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID; + pf->customized_pctype[i].valid = false; + } + + pf->gtp_support = false; +} + +void +i40e_init_queue_region_conf(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_queue_regions *info = &pf->queue_region; + uint16_t i; + + for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++) + i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0); + + memset(info, 0, sizeof(struct i40e_queue_regions)); +} + +#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver" + +static int +i40e_parse_multi_drv_handler(__rte_unused const char *key, + const char *value, + void *opaque) +{ + struct i40e_pf *pf; + unsigned long support_multi_driver; + char *end; + + pf = (struct i40e_pf *)opaque; + + errno = 0; + support_multi_driver = strtoul(value, &end, 10); + if (errno != 0 || end == value || *end != 0) { + PMD_DRV_LOG(WARNING, "Wrong global configuration"); + return -(EINVAL); + } + + if (support_multi_driver == 1 || support_multi_driver == 0) + pf->support_multi_driver = (bool)support_multi_driver; + else + PMD_DRV_LOG(WARNING, "%s must be 1 or 0,", + "enable global configuration by default." + ETH_I40E_SUPPORT_MULTI_DRIVER); + return 0; +} + +static int +i40e_support_multi_driver(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + static const char *const valid_keys[] = { + ETH_I40E_SUPPORT_MULTI_DRIVER, NULL}; + struct rte_kvargs *kvlist; + + /* Enable global configuration by default */ + pf->support_multi_driver = false; + + if (!dev->device->devargs) + return 0; + + kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); + if (!kvlist) + return -EINVAL; + + if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1) + PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only " + "the first invalid or last valid one is used !", + ETH_I40E_SUPPORT_MULTI_DRIVER); + + if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER, + i40e_parse_multi_drv_handler, pf) < 0) { + rte_kvargs_free(kvlist); + return -EINVAL; + } + + rte_kvargs_free(kvlist); + return 0; +} + static int eth_i40e_dev_init(struct rte_eth_dev *dev) { @@ -1065,7 +1121,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) intr_handle = &pci_dev->intr_handle; rte_eth_copy_pci_info(dev, pci_dev); - dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); pf->adapter->eth_dev = dev; @@ -1087,6 +1142,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) hw->bus.func = pci_dev->addr.function; hw->adapter_stopped = 0; + /* Check if need to support multi-driver */ + i40e_support_multi_driver(dev); + /* Make sure all is clean before doing PF reset */ i40e_clear_hw(hw); @@ -1107,13 +1165,17 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) return ret; } + i40e_config_automask(pf); + + i40e_set_default_pctype_table(dev); + /* * To work around the NVM issue, initialize registers - * for flexible payload and packet type of QinQ by - * software. It should be removed once issues are fixed - * in NVM. + * for packet type of QinQ by software. + * It should be removed once issues are fixed in NVM. */ - i40e_GLQF_reg_init(hw); + if (!pf->support_multi_driver) + i40e_GLQF_reg_init(hw); /* Initialize the input set for filters (hash and fd) to default value */ i40e_filter_input_set_init(pf); @@ -1133,20 +1195,24 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) (hw->nvm.version & 0xf), hw->nvm.eetrack); /* initialise the L3_MAP register */ - ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40), - 0x00000028, NULL); - if (ret) - PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", ret); + if (!pf->support_multi_driver) { + ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40), + 0x00000028, NULL); + if (ret) + PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", + ret); + PMD_INIT_LOG(DEBUG, + "Global register 0x%08x is changed with 0x28", + I40E_GLQF_L3_MAP(40)); + i40e_global_cfg_warning(I40E_WARNING_QINQ_CLOUD_FILTER); + } /* Need the special FW version to support floating VEB */ config_floating_veb(dev); /* Clear PXE mode */ i40e_clear_pxe_mode(hw); - ret = i40e_dev_sync_phy_type(hw); - if (ret) { - PMD_INIT_LOG(ERR, "Failed to sync phy type: %d", ret); - goto err_sync_phy_type; - } + i40e_dev_sync_phy_type(hw); + /* * On X710, performance number is far from the expectation on recent * firmware versions. The fix for this issue may not be integrated in @@ -1215,11 +1281,15 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) i40e_set_fc(hw, &aq_fail, TRUE); /* Set the global registers with default ether type value */ - ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN); - if (ret != I40E_SUCCESS) { - PMD_INIT_LOG(ERR, - "Failed to set the default outer VLAN ether type"); - goto err_setup_pf_switch; + if (!pf->support_multi_driver) { + ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, + ETHER_TYPE_VLAN); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, + "Failed to set the default outer " + "VLAN ether type"); + goto err_setup_pf_switch; + } } /* PF setup, which includes VSI setup */ @@ -1283,6 +1353,11 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* enable uio intr after callback register */ rte_intr_enable(intr_handle); + + /* By default disable flexible payload in global configuration */ + if (!pf->support_multi_driver) + i40e_flex_payload_reg_set_default(hw); + /* * Add an ethertype filter to drop all flow control frames transmitted * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC @@ -1298,6 +1373,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* initialize mirror rule list */ TAILQ_INIT(&pf->mirror_list); + /* initialize Traffic Manager configuration */ + i40e_tm_conf_init(dev); + + /* Initialize customized information */ + i40e_init_customized_info(pf); + ret = i40e_init_ethtype_filter_list(dev); if (ret < 0) goto err_init_ethtype_filter_list; @@ -1308,6 +1389,13 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) if (ret < 0) goto err_init_fdir_filter_list; + /* initialize queue region configuration */ + i40e_init_queue_region_conf(dev); + + /* initialize rss configuration from rte_flow */ + memset(&pf->rss_info, 0, + sizeof(struct i40e_rte_flow_rss_conf)); + return 0; err_init_fdir_filter_list: @@ -1331,7 +1419,6 @@ err_msix_pool_init: err_qp_pool_init: err_parameter_init: err_get_capabilities: -err_sync_phy_type: (void)i40e_shutdown_adminq(hw); return ret; @@ -1395,6 +1482,18 @@ i40e_rm_fdir_filter_list(struct i40e_pf *pf) } } +void i40e_flex_payload_reg_set_default(struct i40e_hw *hw) +{ + /* + * Disable by default flexible payload + * for corresponding L2/L3/L4 layers. + */ + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000); + i40e_global_cfg_warning(I40E_WARNING_DIS_FLX_PLD); +} + static int eth_i40e_dev_uninit(struct rte_eth_dev *dev) { @@ -1406,6 +1505,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) struct rte_flow *p_flow; int ret; uint8_t aq_fail = 0; + int retries = 0; PMD_INIT_FUNC_TRACE(); @@ -1447,9 +1547,20 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) /* disable uio intr before callback unregister */ rte_intr_disable(intr_handle); - /* register callback func to eal lib */ - rte_intr_callback_unregister(intr_handle, - i40e_dev_interrupt_handler, dev); + /* unregister callback func to eal lib */ + do { + ret = rte_intr_callback_unregister(intr_handle, + i40e_dev_interrupt_handler, dev); + if (ret >= 0) { + break; + } else if (ret != -EAGAIN) { + PMD_INIT_LOG(ERR, + "intr callback unregister failed: %d", + ret); + return ret; + } + i40e_msec_delay(500); + } while (retries++ < 5); i40e_rm_ethtype_filter_list(pf); i40e_rm_tunnel_filter_list(pf); @@ -1461,6 +1572,9 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) rte_free(p_flow); } + /* Remove all Traffic Manager configuration */ + i40e_tm_conf_uninit(dev); + return 0; } @@ -1470,9 +1584,14 @@ i40e_dev_configure(struct rte_eth_dev *dev) struct i40e_adapter *ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; int i, ret; + ret = i40e_dev_sync_phy_type(hw); + if (ret) + return ret; + /* Initialize to TRUE. If any of Rx queues doesn't meet the * bulk allocation or vector Rx preconditions we will reset it. */ @@ -1586,16 +1705,18 @@ i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi) static void __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, - int base_queue, int nb_queue) + int base_queue, int nb_queue, + uint16_t itr_idx) { int i; uint32_t val; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); /* Bind all RX queues to allocated MSIX interrupt */ for (i = 0; i < nb_queue; i++) { val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | - I40E_QINT_RQCTL_ITR_INDX_MASK | + itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT | ((base_queue + i + 1) << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | @@ -1609,7 +1730,8 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, /* Write first RX queue to Link list register as the head element */ if (vsi->type != I40E_VSI_SRIOV) { uint16_t interval = - i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); + i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1, + pf->support_multi_driver); if (msix_vect == I40E_MISC_VEC_ID) { I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, @@ -1658,7 +1780,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, } void -i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) +i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); @@ -1668,7 +1790,6 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd); uint16_t queue_idx = 0; int record = 0; - uint32_t val; int i; for (i = 0; i < vsi->nb_qps; i++) { @@ -1676,17 +1797,11 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0); } - /* INTENA flag is not auto-cleared for interrupt */ - val = I40E_READ_REG(hw, I40E_GLINT_CTL); - val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK | - I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK | - I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; - I40E_WRITE_REG(hw, I40E_GLINT_CTL, val); - /* VF bind interrupt */ if (vsi->type == I40E_VSI_SRIOV) { __vsi_queues_bind_intr(vsi, msix_vect, - vsi->base_queue, vsi->nb_qps); + vsi->base_queue, vsi->nb_qps, + itr_idx); return; } @@ -1712,7 +1827,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) /* no enough msix_vect, map all to one */ __vsi_queues_bind_intr(vsi, msix_vect, vsi->base_queue + i, - vsi->nb_used_qps - i); + vsi->nb_used_qps - i, + itr_idx); for (; !!record && i < vsi->nb_used_qps; i++) intr_handle->intr_vec[queue_idx + i] = msix_vect; @@ -1720,7 +1836,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) } /* 1:1 queue/msix_vect mapping */ __vsi_queues_bind_intr(vsi, msix_vect, - vsi->base_queue + i, 1); + vsi->base_queue + i, 1, + itr_idx); if (!!record) intr_handle->intr_vec[queue_idx + i] = msix_vect; @@ -1736,27 +1853,22 @@ i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); - uint16_t interval = i40e_calc_itr_interval(\ - RTE_LIBRTE_I40E_ITR_INTERVAL); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); uint16_t msix_intr, i; - if (rte_intr_allow_others(intr_handle)) + if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver) for (i = 0; i < vsi->nb_msix; i++) { msix_intr = vsi->msix_intr + i; I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1), I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (interval << - I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)); + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); } else I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | - (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) | - (interval << - I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)); + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); I40E_WRITE_FLUSH(hw); } @@ -1768,16 +1880,18 @@ i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); uint16_t msix_intr, i; - if (rte_intr_allow_others(intr_handle)) + if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver) for (i = 0; i < vsi->nb_msix; i++) { msix_intr = vsi->msix_intr + i; I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1), - 0); + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); } else - I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0); + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); I40E_WRITE_FLUSH(hw); } @@ -1806,11 +1920,15 @@ i40e_parse_link_speeds(uint16_t link_speeds) static int i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, - uint8_t force_speed) + uint8_t force_speed, + bool is_up) { enum i40e_status_code status; struct i40e_aq_get_phy_abilities_resp phy_ab; struct i40e_aq_set_phy_config phy_conf; + enum i40e_aq_phy_type cnt; + uint32_t phy_type_mask = 0; + const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX | I40E_AQ_PHY_FLAG_PAUSE_RX | I40E_AQ_PHY_FLAG_PAUSE_RX | @@ -1828,6 +1946,10 @@ i40e_phy_conf_link(struct i40e_hw *hw, if (status) return ret; + /* If link already up, no need to set up again */ + if (is_up && phy_ab.phy_type != 0) + return I40E_SUCCESS; + memset(&phy_conf, 0, sizeof(phy_conf)); /* bits 0-2 use the values from get_phy_abilities_resp */ @@ -1838,13 +1960,21 @@ i40e_phy_conf_link(struct i40e_hw *hw, if (abilities & I40E_AQ_PHY_AN_ENABLED) phy_conf.link_speed = advt; else - phy_conf.link_speed = force_speed; + phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed; phy_conf.abilities = abilities; + + + /* To enable link, phy_type mask needs to include each type */ + for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++) + phy_type_mask |= 1 << cnt; + /* use get_phy_abilities_resp value for the rest */ - phy_conf.phy_type = phy_ab.phy_type; - phy_conf.phy_type_ext = phy_ab.phy_type_ext; + phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0; + phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR | + I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR | + I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0; phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info; phy_conf.eee_capability = phy_ab.eee_capability; phy_conf.eeer = phy_ab.eeer_val; @@ -1876,13 +2006,7 @@ i40e_apply_link_speed(struct rte_eth_dev *dev) abilities |= I40E_AQ_PHY_AN_ENABLED; abilities |= I40E_AQ_PHY_LINK_ENABLED; - /* Skip changing speed on 40G interfaces, FW does not support */ - if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) { - speed = I40E_LINK_SPEED_UNKNOWN; - abilities |= I40E_AQ_PHY_AN_ENABLED; - } - - return i40e_phy_conf_link(hw, abilities, speed); + return i40e_phy_conf_link(hw, abilities, speed, true); } static int @@ -1900,8 +2024,9 @@ i40e_dev_start(struct rte_eth_dev *dev) hw->adapter_stopped = 0; if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { - PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; autonegotiation disabled", - dev->data->port_id); + PMD_INIT_LOG(ERR, + "Invalid link_speeds for port %u, autonegotiation disabled", + dev->data->port_id); return -EINVAL; } @@ -1939,19 +2064,21 @@ i40e_dev_start(struct rte_eth_dev *dev) /* Map queues with MSIX interrupt */ main_vsi->nb_used_qps = dev->data->nb_rx_queues - pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; - i40e_vsi_queues_bind_intr(main_vsi); + i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT); i40e_vsi_enable_queues_intr(main_vsi); /* Map VMDQ VSI queues with MSIX interrupt */ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; - i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi); + i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi, + I40E_ITR_INDEX_DEFAULT); i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi); } /* enable FDIR MSIX interrupt */ if (pf->fdir.fdir_vsi) { - i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi); + i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi, + I40E_ITR_INDEX_NONE); i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi); } @@ -1983,6 +2110,16 @@ i40e_dev_start(struct rte_eth_dev *dev) } } + /* Enable mac loopback mode */ + if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE || + dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) { + ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "fail to set loopback link"); + goto err_up; + } + } + /* Apply link configure */ if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | @@ -2008,7 +2145,7 @@ i40e_dev_start(struct rte_eth_dev *dev) if (dev->data->dev_conf.intr_conf.lsc != 0) PMD_INIT_LOG(INFO, "lsc won't enable because of no intr multiplex"); - } else if (dev->data->dev_conf.intr_conf.lsc != 0) { + } else { ret = i40e_aq_set_phy_int_mask(hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL | @@ -2016,7 +2153,7 @@ i40e_dev_start(struct rte_eth_dev *dev) if (ret != I40E_SUCCESS) PMD_DRV_LOG(WARNING, "Fail to set phy mask"); - /* Call get_link_info aq commond to enable LSE */ + /* Call get_link_info aq commond to enable/disable LSE */ i40e_dev_link_update(dev, 0); } @@ -2025,6 +2162,11 @@ i40e_dev_start(struct rte_eth_dev *dev) i40e_filter_restore(pf); + if (pf->tm_conf.root && !pf->tm_conf.committed) + PMD_DRV_LOG(WARNING, + "please call hierarchy_commit() " + "before starting the port"); + return I40E_SUCCESS; err_up: @@ -2038,12 +2180,14 @@ static void i40e_dev_stop(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *main_vsi = pf->main_vsi; - struct i40e_mirror_rule *p_mirror; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int i; + if (hw->adapter_stopped == 1) + return; /* Disable all queues */ i40e_dev_switch_queues(pf, FALSE); @@ -2066,13 +2210,6 @@ i40e_dev_stop(struct rte_eth_dev *dev) /* Set link down */ i40e_dev_set_link_down(dev); - /* Remove all mirror rules */ - while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) { - TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules); - rte_free(p_mirror); - } - pf->nb_mirror_rule = 0; - if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ rte_intr_callback_register(intr_handle, @@ -2085,6 +2222,11 @@ i40e_dev_stop(struct rte_eth_dev *dev) rte_free(intr_handle->intr_vec); intr_handle->intr_vec = NULL; } + + /* reset hierarchy commit */ + pf->tm_conf.committed = false; + + hw->adapter_stopped = 1; } static void @@ -2094,13 +2236,34 @@ i40e_dev_close(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct i40e_mirror_rule *p_mirror; uint32_t reg; int i; + int ret; PMD_INIT_FUNC_TRACE(); i40e_dev_stop(dev); - hw->adapter_stopped = 1; + + /* Remove all mirror rules */ + while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) { + ret = i40e_aq_del_mirror_rule(hw, + pf->main_vsi->veb->seid, + p_mirror->rule_type, + p_mirror->entries, + p_mirror->num_entries, + p_mirror->id); + if (ret < 0) + PMD_DRV_LOG(ERR, "failed to remove mirror rule: " + "status = %d, aq_err = %d.", ret, + hw->aq.asq_last_status); + + /* remove mirror software resource anyway */ + TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules); + rte_free(p_mirror); + pf->nb_mirror_rule--; + } + i40e_dev_free_queues(dev); /* Disable interrupt */ @@ -2128,6 +2291,10 @@ i40e_dev_close(struct rte_eth_dev *dev) i40e_res_pool_destroy(&pf->qp_pool); i40e_res_pool_destroy(&pf->msix_pool); + /* Disable flexible payload in global configuration */ + if (!pf->support_multi_driver) + i40e_flex_payload_reg_set_default(hw); + /* force a PF reset to clean anything leftover */ reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL); I40E_WRITE_REG(hw, I40E_PFGEN_CTRL, @@ -2135,6 +2302,32 @@ i40e_dev_close(struct rte_eth_dev *dev) I40E_WRITE_FLUSH(hw); } +/* + * Reset PF device only to re-initialize resources in PMD layer + */ +static int +i40e_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + /* When a DPDK PMD PF begin to reset PF port, it should notify all + * its VF to make them align with it. The detailed notification + * mechanism is PMD specific. As to i40e PF, it is rather complex. + * To avoid unexpected behavior in VF, currently reset of PF with + * SR-IOV activation is not supported. It might be supported later. + */ + if (dev->data->sriov.active) + return -ENOTSUP; + + ret = eth_i40e_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_i40e_dev_init(dev); + + return ret; +} + static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev) { @@ -2225,87 +2418,146 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK; - return i40e_phy_conf_link(hw, abilities, speed); + return i40e_phy_conf_link(hw, abilities, speed, false); +} + +static __rte_always_inline void +update_link_no_wait(struct i40e_hw *hw, struct rte_eth_link *link) +{ +/* Link status registers and values*/ +#define I40E_PRTMAC_LINKSTA 0x001E2420 +#define I40E_REG_LINK_UP 0x40000080 +#define I40E_PRTMAC_MACC 0x001E24E0 +#define I40E_REG_MACC_25GB 0x00020000 +#define I40E_REG_SPEED_MASK 0x38000000 +#define I40E_REG_SPEED_100MB 0x00000000 +#define I40E_REG_SPEED_1GB 0x08000000 +#define I40E_REG_SPEED_10GB 0x10000000 +#define I40E_REG_SPEED_20GB 0x20000000 +#define I40E_REG_SPEED_25_40GB 0x18000000 + uint32_t link_speed; + uint32_t reg_val; + + reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA); + link_speed = reg_val & I40E_REG_SPEED_MASK; + reg_val &= I40E_REG_LINK_UP; + link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0; + + if (unlikely(link->link_status != 0)) + return; + + /* Parse the link status */ + switch (link_speed) { + case I40E_REG_SPEED_100MB: + link->link_speed = ETH_SPEED_NUM_100M; + break; + case I40E_REG_SPEED_1GB: + link->link_speed = ETH_SPEED_NUM_1G; + break; + case I40E_REG_SPEED_10GB: + link->link_speed = ETH_SPEED_NUM_10G; + break; + case I40E_REG_SPEED_20GB: + link->link_speed = ETH_SPEED_NUM_20G; + break; + case I40E_REG_SPEED_25_40GB: + reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC); + + if (reg_val & I40E_REG_MACC_25GB) + link->link_speed = ETH_SPEED_NUM_25G; + else + link->link_speed = ETH_SPEED_NUM_40G; + + break; + default: + PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed); + break; + } } -int -i40e_dev_link_update(struct rte_eth_dev *dev, - int wait_to_complete) +static __rte_always_inline void +update_link_wait(struct i40e_hw *hw, struct rte_eth_link *link, + bool enable_lse) { -#define CHECK_INTERVAL 100 /* 100ms */ -#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ + uint32_t rep_cnt = MAX_REPEAT_TIME; struct i40e_link_status link_status; - struct rte_eth_link link, old; int status; - unsigned rep_cnt = MAX_REPEAT_TIME; - bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; - memset(&link, 0, sizeof(link)); - memset(&old, 0, sizeof(old)); memset(&link_status, 0, sizeof(link_status)); - rte_i40e_dev_atomic_read_link_status(dev, &old); do { + memset(&link_status, 0, sizeof(link_status)); + /* Get link status information from hardware */ status = i40e_aq_get_link_info(hw, enable_lse, &link_status, NULL); - if (status != I40E_SUCCESS) { - link.link_speed = ETH_SPEED_NUM_100M; - link.link_duplex = ETH_LINK_FULL_DUPLEX; + if (unlikely(status != I40E_SUCCESS)) { + link->link_speed = ETH_SPEED_NUM_100M; + link->link_duplex = ETH_LINK_FULL_DUPLEX; PMD_DRV_LOG(ERR, "Failed to get link info"); - goto out; + return; } - link.link_status = link_status.link_info & I40E_AQ_LINK_UP; - if (!wait_to_complete || link.link_status) - break; + link->link_status = link_status.link_info & I40E_AQ_LINK_UP; + if (unlikely(link->link_status != 0)) + return; rte_delay_ms(CHECK_INTERVAL); } while (--rep_cnt); - if (!link.link_status) - goto out; - - /* i40e uses full duplex only */ - link.link_duplex = ETH_LINK_FULL_DUPLEX; - /* Parse the link status */ switch (link_status.link_speed) { case I40E_LINK_SPEED_100MB: - link.link_speed = ETH_SPEED_NUM_100M; + link->link_speed = ETH_SPEED_NUM_100M; break; case I40E_LINK_SPEED_1GB: - link.link_speed = ETH_SPEED_NUM_1G; + link->link_speed = ETH_SPEED_NUM_1G; break; case I40E_LINK_SPEED_10GB: - link.link_speed = ETH_SPEED_NUM_10G; + link->link_speed = ETH_SPEED_NUM_10G; break; case I40E_LINK_SPEED_20GB: - link.link_speed = ETH_SPEED_NUM_20G; + link->link_speed = ETH_SPEED_NUM_20G; break; case I40E_LINK_SPEED_25GB: - link.link_speed = ETH_SPEED_NUM_25G; + link->link_speed = ETH_SPEED_NUM_25G; break; case I40E_LINK_SPEED_40GB: - link.link_speed = ETH_SPEED_NUM_40G; + link->link_speed = ETH_SPEED_NUM_40G; break; default: - link.link_speed = ETH_SPEED_NUM_100M; + link->link_speed = ETH_SPEED_NUM_100M; break; } +} + +int +i40e_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link; + bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; + int ret; + + memset(&link, 0, sizeof(link)); + /* i40e uses full duplex only */ + link.link_duplex = ETH_LINK_FULL_DUPLEX; link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); -out: - rte_i40e_dev_atomic_write_link_status(dev, &link); - if (link.link_status == old.link_status) - return -1; + if (!wait_to_complete) + update_link_no_wait(hw, &link); + else + update_link_wait(hw, &link, enable_lse); + ret = rte_eth_linkstatus_set(dev, &link); i40e_notify_all_vfs_link_status(dev); - return 0; + return ret; } /* Get all the statistics of a VSI */ @@ -2352,9 +2604,6 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi) i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx), vsi->offset_loaded, &oes->tx_broadcast, &nes->tx_broadcast); - /* exclude CRC bytes */ - nes->tx_bytes -= (nes->tx_unicast + nes->tx_multicast + - nes->tx_broadcast) * ETHER_CRC_LEN; /* GLV_TDPC not supported */ i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded, &oes->tx_errors, &nes->tx_errors); @@ -2390,14 +2639,51 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port), I40E_GLV_GORCL(hw->port), pf->offset_loaded, - &pf->internal_rx_bytes_offset, - &pf->internal_rx_bytes); + &pf->internal_stats_offset.rx_bytes, + &pf->internal_stats.rx_bytes); i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port), I40E_GLV_GOTCL(hw->port), pf->offset_loaded, - &pf->internal_tx_bytes_offset, - &pf->internal_tx_bytes); + &pf->internal_stats_offset.tx_bytes, + &pf->internal_stats.tx_bytes); + /* Get total internal rx packet count */ + i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port), + I40E_GLV_UPRCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_unicast, + &pf->internal_stats.rx_unicast); + i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port), + I40E_GLV_MPRCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_multicast, + &pf->internal_stats.rx_multicast); + i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port), + I40E_GLV_BPRCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_broadcast, + &pf->internal_stats.rx_broadcast); + /* Get total internal tx packet count */ + i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port), + I40E_GLV_UPTCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.tx_unicast, + &pf->internal_stats.tx_unicast); + i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port), + I40E_GLV_MPTCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.tx_multicast, + &pf->internal_stats.tx_multicast); + i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port), + I40E_GLV_BPTCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.tx_broadcast, + &pf->internal_stats.tx_broadcast); + + /* exclude CRC size */ + pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast + + pf->internal_stats.rx_multicast + + pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN; /* Get statistics of struct i40e_eth_stats */ i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port), @@ -2420,7 +2706,33 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) * so subtract ETHER_CRC_LEN from the byte counter for each rx packet. */ ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast + - ns->eth.rx_broadcast) * ETHER_CRC_LEN + pf->internal_rx_bytes; + ns->eth.rx_broadcast) * ETHER_CRC_LEN; + + /* exclude internal rx bytes + * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before + * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative + * value. + * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L]. + */ + if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes) + ns->eth.rx_bytes = 0; + else + ns->eth.rx_bytes -= pf->internal_stats.rx_bytes; + + if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast) + ns->eth.rx_unicast = 0; + else + ns->eth.rx_unicast -= pf->internal_stats.rx_unicast; + + if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast) + ns->eth.rx_multicast = 0; + else + ns->eth.rx_multicast -= pf->internal_stats.rx_multicast; + + if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast) + ns->eth.rx_broadcast = 0; + else + ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast; i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port), pf->offset_loaded, &os->eth.rx_discards, @@ -2448,7 +2760,34 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) pf->offset_loaded, &os->eth.tx_broadcast, &ns->eth.tx_broadcast); ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast + - ns->eth.tx_broadcast) * ETHER_CRC_LEN + pf->internal_tx_bytes; + ns->eth.tx_broadcast) * ETHER_CRC_LEN; + + /* exclude internal tx bytes + * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before + * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative + * value. + * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L]. + */ + if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes) + ns->eth.tx_bytes = 0; + else + ns->eth.tx_bytes -= pf->internal_stats.tx_bytes; + + if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast) + ns->eth.tx_unicast = 0; + else + ns->eth.tx_unicast -= pf->internal_stats.tx_unicast; + + if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast) + ns->eth.tx_multicast = 0; + else + ns->eth.tx_multicast -= pf->internal_stats.tx_multicast; + + if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast) + ns->eth.tx_broadcast = 0; + else + ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast; + /* GLPRT_TEPC not supported */ /* additional port specific stats */ @@ -2588,7 +2927,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) } /* Get all statistics of a port */ -static void +static int i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -2599,13 +2938,14 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* call read registers - updates values, now write them to struct */ i40e_read_stats_registers(pf, hw); - stats->ipackets = pf->main_vsi->eth_stats.rx_unicast + - pf->main_vsi->eth_stats.rx_multicast + - pf->main_vsi->eth_stats.rx_broadcast - + stats->ipackets = ns->eth.rx_unicast + + ns->eth.rx_multicast + + ns->eth.rx_broadcast - + ns->eth.rx_discards - pf->main_vsi->eth_stats.rx_discards; - stats->opackets = pf->main_vsi->eth_stats.tx_unicast + - pf->main_vsi->eth_stats.tx_multicast + - pf->main_vsi->eth_stats.tx_broadcast; + stats->opackets = ns->eth.tx_unicast + + ns->eth.tx_multicast + + ns->eth.tx_broadcast; stats->ibytes = ns->eth.rx_bytes; stats->obytes = ns->eth.tx_bytes; stats->oerrors = ns->eth.tx_errors + @@ -2687,6 +3027,7 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) ns->checksum_error); PMD_DRV_LOG(DEBUG, "fdir_match: %"PRIu64"", ns->fd_sb_match); PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************"); + return 0; } /* Reset the statistics */ @@ -2871,19 +3212,25 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) struct i40e_vsi *vsi = pf->main_vsi; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = vsi->nb_qps; dev_info->max_tx_queues = vsi->nb_qps; dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX; dev_info->max_mac_addrs = vsi->max_macaddrs; dev_info->max_vfs = pci_dev->max_vfs; + dev_info->rx_queue_offload_capa = 0; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_QINQ_STRIP | DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_VLAN_EXTEND | + DEV_RX_OFFLOAD_VLAN_FILTER; + + dev_info->tx_queue_offload_capa = 0; dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT | @@ -2900,7 +3247,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); dev_info->reta_size = pf->hash_lut_size; - dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL; + dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -2910,6 +3257,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -2948,15 +3296,42 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_tx_queues += dev_info->vmdq_queue_num; } - if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) + if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) { /* For XL710 */ dev_info->speed_capa = ETH_LINK_SPEED_40G; - else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) + dev_info->default_rxportconf.nb_queues = 2; + dev_info->default_txportconf.nb_queues = 2; + if (dev->data->nb_rx_queues == 1) + dev_info->default_rxportconf.ring_size = 2048; + else + dev_info->default_rxportconf.ring_size = 1024; + if (dev->data->nb_tx_queues == 1) + dev_info->default_txportconf.ring_size = 1024; + else + dev_info->default_txportconf.ring_size = 512; + + } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) { /* For XXV710 */ dev_info->speed_capa = ETH_LINK_SPEED_25G; - else + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_rxportconf.ring_size = 256; + dev_info->default_txportconf.ring_size = 256; + } else { /* For X710 */ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) { + dev_info->default_rxportconf.ring_size = 512; + dev_info->default_txportconf.ring_size = 256; + } else { + dev_info->default_rxportconf.ring_size = 256; + dev_info->default_txportconf.ring_size = 256; + } + } + dev_info->default_rxportconf.burst_size = 32; + dev_info->default_txportconf.burst_size = 32; } static int @@ -2973,83 +3348,116 @@ i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) } static int -i40e_vlan_tpid_set(struct rte_eth_dev *dev, - enum rte_vlan_type vlan_type, - uint16_t tpid) +i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid, int qinq) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint64_t reg_r = 0, reg_w = 0; - uint16_t reg_id = 0; - int ret = 0; - int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend; + uint64_t reg_r = 0; + uint64_t reg_w = 0; + uint16_t reg_id = 3; + int ret; - switch (vlan_type) { - case ETH_VLAN_TYPE_OUTER: - if (qinq) + if (qinq) { + if (vlan_type == ETH_VLAN_TYPE_OUTER) reg_id = 2; - else - reg_id = 3; - break; - case ETH_VLAN_TYPE_INNER: - if (qinq) - reg_id = 3; - else { - ret = -EINVAL; - PMD_DRV_LOG(ERR, - "Unsupported vlan type in single vlan."); - return ret; - } - break; - default: - ret = -EINVAL; - PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type); - return ret; } + ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), ®_r, NULL); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]", reg_id); - ret = -EIO; - return ret; + return -EIO; } PMD_DRV_LOG(DEBUG, - "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64, - reg_id, reg_r); + "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64, + reg_id, reg_r); reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK)); reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT); if (reg_r == reg_w) { - ret = 0; PMD_DRV_LOG(DEBUG, "No need to write"); - return ret; + return 0; } ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), reg_w, NULL); if (ret != I40E_SUCCESS) { - ret = -EIO; PMD_DRV_LOG(ERR, - "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]", - reg_id); - return ret; + "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]", + reg_id); + return -EIO; } PMD_DRV_LOG(DEBUG, - "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]", - reg_w, reg_id); + "Global register 0x%08x is changed with value 0x%08x", + I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w); + + return 0; +} + +static int +i40e_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int qinq = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND; + int ret = 0; + + if ((vlan_type != ETH_VLAN_TYPE_INNER && + vlan_type != ETH_VLAN_TYPE_OUTER) || + (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) { + PMD_DRV_LOG(ERR, + "Unsupported vlan type."); + return -EINVAL; + } + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Setting TPID is not supported."); + return -ENOTSUP; + } + + /* 802.1ad frames ability is added in NVM API 1.7*/ + if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { + if (qinq) { + if (vlan_type == ETH_VLAN_TYPE_OUTER) + hw->first_tag = rte_cpu_to_le_16(tpid); + else if (vlan_type == ETH_VLAN_TYPE_INNER) + hw->second_tag = rte_cpu_to_le_16(tpid); + } else { + if (vlan_type == ETH_VLAN_TYPE_OUTER) + hw->second_tag = rte_cpu_to_le_16(tpid); + } + ret = i40e_aq_set_switch_config(hw, 0, 0, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Set switch config failed aq_err: %d", + hw->aq.asq_last_status); + ret = -EIO; + } + } else + /* If NVM API < 1.7, keep the register setting */ + ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type, + tpid, qinq); + i40e_global_cfg_warning(I40E_WARNING_TPID); return ret; } -static void +static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_vsi *vsi = pf->main_vsi; + struct rte_eth_rxmode *rxmode; + rxmode = &dev->data->dev_conf.rxmode; if (mask & ETH_VLAN_FILTER_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) i40e_vsi_config_vlan_filter(vsi, TRUE); else i40e_vsi_config_vlan_filter(vsi, FALSE); @@ -3057,16 +3465,16 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) i40e_vsi_config_vlan_stripping(vsi, TRUE); else i40e_vsi_config_vlan_stripping(vsi, FALSE); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) { i40e_vsi_config_double_vlan(vsi, TRUE); - /* Set global registers with default ether type value */ + /* Set global registers with default ethertype. */ i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN); i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER, @@ -3075,6 +3483,8 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) else i40e_vsi_config_double_vlan(vsi, FALSE); } + + return 0; } static void @@ -3138,6 +3548,13 @@ i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); fc_conf->pause_time = pf->fc_conf.pause_time; + + /* read out from register, in case they are modified by other port */ + pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = + I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT; + pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = + I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT; + fc_conf->high_water = pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]; fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]; @@ -3258,19 +3675,25 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg); } - /* config the water marker both based on the packets and bytes */ - I40E_WRITE_REG(hw, I40E_GLRPB_PHW, - (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] - << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE); - I40E_WRITE_REG(hw, I40E_GLRPB_PLW, - (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] - << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE); - I40E_WRITE_REG(hw, I40E_GLRPB_GHW, - pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] - << I40E_KILOSHIFT); - I40E_WRITE_REG(hw, I40E_GLRPB_GLW, - pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] - << I40E_KILOSHIFT); + if (!pf->support_multi_driver) { + /* config water marker both based on the packets and bytes */ + I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW, + (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] + << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE); + I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW, + (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] + << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE); + I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW, + pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] + << I40E_KILOSHIFT); + I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW, + pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] + << I40E_KILOSHIFT); + i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL); + } else { + PMD_DRV_LOG(ERR, + "Water marker configuration is not supported."); + } I40E_WRITE_FLUSH(hw); @@ -3296,6 +3719,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev, struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_mac_filter_info mac_filter; struct i40e_vsi *vsi; + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; int ret; /* If VMDQ not enabled or configured, return */ @@ -3313,8 +3737,8 @@ i40e_macaddr_add(struct rte_eth_dev *dev, return -EINVAL; } - (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN); - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN); + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; else mac_filter.filter_type = RTE_MAC_PERFECT_MATCH; @@ -3418,10 +3842,10 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf, } if (add) { - (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN); - (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes, + rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN); + rte_memcpy(hw->mac.addr, new_mac->addr_bytes, ETHER_ADDR_LEN); - (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr, + rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr, ETHER_ADDR_LEN); mac_filter.filter_type = filter->filter_type; @@ -3432,7 +3856,7 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf, } ether_addr_copy(new_mac, &pf->dev_addr); } else { - (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, + rte_memcpy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr); if (ret != I40E_SUCCESS) { @@ -3490,6 +3914,7 @@ i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) { struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint32_t reg; int ret; if (!lut) @@ -3506,14 +3931,22 @@ i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) uint32_t *lut_dw = (uint32_t *)lut; uint16_t i, lut_size_dw = lut_size / 4; - for (i = 0; i < lut_size_dw; i++) - lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i)); + if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i <= lut_size_dw; i++) { + reg = I40E_VFQF_HLUT1(i, vsi->user_param); + lut_dw[i] = i40e_read_rx_ctl(hw, reg); + } + } else { + for (i = 0; i < lut_size_dw; i++) + lut_dw[i] = I40E_READ_REG(hw, + I40E_PFQF_HLUT(i)); + } } return 0; } -static int +int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) { struct i40e_pf *pf; @@ -3537,8 +3970,17 @@ i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) uint32_t *lut_dw = (uint32_t *)lut; uint16_t i, lut_size_dw = lut_size / 4; - for (i = 0; i < lut_size_dw; i++) - I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]); + if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i < lut_size_dw; i++) + I40E_WRITE_REG( + hw, + I40E_VFQF_HLUT1(i, vsi->user_param), + lut_dw[i]); + } else { + for (i = 0; i < lut_size_dw; i++) + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), + lut_dw[i]); + } I40E_WRITE_FLUSH(hw); } @@ -3647,14 +4089,14 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, return I40E_ERR_PARAM; snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand()); - mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0, - alignment, RTE_PGSIZE_2M); + mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M); if (!mz) return I40E_ERR_NO_MEMORY; mem->size = size; mem->va = mz->addr; - mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); + mem->pa = mz->iova; mem->zone = (const void *)mz; PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: %"PRIu64, @@ -3783,6 +4225,68 @@ i40e_get_cap(struct i40e_hw *hw) return ret; } +#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4 +#define QUEUE_NUM_PER_VF_ARG "queue-num-per-vf" + +static int i40e_pf_parse_vf_queue_number_handler(const char *key, + const char *value, + void *opaque) +{ + struct i40e_pf *pf; + unsigned long num; + char *end; + + pf = (struct i40e_pf *)opaque; + RTE_SET_USED(key); + + errno = 0; + num = strtoul(value, &end, 0); + if (errno != 0 || end == value || *end != 0) { + PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is " + "kept the value = %hu", value, pf->vf_nb_qp_max); + return -(EINVAL); + } + + if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num)) + pf->vf_nb_qp_max = (uint16_t)num; + else + /* here return 0 to make next valid same argument work */ + PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be " + "power of 2 and equal or less than 16 !, Now it is " + "kept the value = %hu", num, pf->vf_nb_qp_max); + + return 0; +} + +static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev) +{ + static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL}; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_kvargs *kvlist; + + /* set default queue number per VF as 4 */ + pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF; + + if (dev->device->devargs == NULL) + return 0; + + kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); + if (kvlist == NULL) + return -(EINVAL); + + if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1) + PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only " + "the first invalid or last valid one is used !", + QUEUE_NUM_PER_VF_ARG); + + rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG, + i40e_pf_parse_vf_queue_number_handler, pf); + + rte_kvargs_free(kvlist); + + return 0; +} + static int i40e_pf_parameter_init(struct rte_eth_dev *dev) { @@ -3795,6 +4299,9 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV"); return -EINVAL; } + + i40e_pf_config_vf_rxq_number(dev); + /* Add the parameter init for LFC */ pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME; pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER; @@ -3804,7 +4311,6 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) pf->max_num_vsi = hw->func_caps.num_vsis; pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF; pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; - pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF; /* FDir queue/VSI allocation */ pf->fdir_qp_offset = 0; @@ -3834,7 +4340,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps; if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) { pf->flags |= I40E_FLAG_SRIOV; - pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF; + pf->vf_nb_qps = pf->vf_nb_qp_max; pf->vf_num = pci_dev->max_vfs; PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, in total %u queues", @@ -4224,7 +4730,7 @@ i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi, vsi->info.valid_sections = rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); memset(&ctxt, 0, sizeof(ctxt)); - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); ctxt.seid = vsi->seid; hw = I40E_VSI_TO_HW(vsi); @@ -4263,7 +4769,7 @@ i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap) return ret; } - (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles, + rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles, sizeof(vsi->info.qs_handle)); return I40E_SUCCESS; } @@ -4284,6 +4790,8 @@ i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi, for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) if (enabled_tcmap & (1 << i)) total_tc++; + if (total_tc == 0) + total_tc = 1; vsi->enabled_tc = enabled_tcmap; /* Number of queues per enabled TC */ @@ -4518,7 +5026,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) if (vsi->type != I40E_VSI_MAIN) return I40E_ERR_CONFIG; memset(&def_filter, 0, sizeof(def_filter)); - (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr, + rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr, ETH_ADDR_LEN); def_filter.vlan_tag = 0; def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | @@ -4537,7 +5045,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) return I40E_ERR_NO_MEMORY; } mac = &f->mac_info.mac_addr; - (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr, + rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr, ETH_ADDR_LEN); f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH; TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); @@ -4545,7 +5053,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) return ret; } - (void)rte_memcpy(&filter.mac_addr, + rte_memcpy(&filter.mac_addr, (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN); filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; return i40e_vsi_add_mac(vsi, &filter); @@ -4760,16 +5268,28 @@ i40e_vsi_setup(struct i40e_pf *pf, /* VF has MSIX interrupt in VF range, don't allocate here */ if (type == I40E_VSI_MAIN) { - ret = i40e_res_pool_alloc(&pf->msix_pool, - RTE_MIN(vsi->nb_qps, - RTE_MAX_RXTX_INTR_VEC_ID)); - if (ret < 0) { - PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d", - vsi->seid, ret); - goto fail_queue_alloc; + if (pf->support_multi_driver) { + /* If support multi-driver, need to use INT0 instead of + * allocating from msix pool. The Msix pool is init from + * INT1, so it's OK just set msix_intr to 0 and nb_msix + * to 1 without calling i40e_res_pool_alloc. + */ + vsi->msix_intr = 0; + vsi->nb_msix = 1; + } else { + ret = i40e_res_pool_alloc(&pf->msix_pool, + RTE_MIN(vsi->nb_qps, + RTE_MAX_RXTX_INTR_VEC_ID)); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "VSI MAIN %d get heap failed %d", + vsi->seid, ret); + goto fail_queue_alloc; + } + vsi->msix_intr = ret; + vsi->nb_msix = RTE_MIN(vsi->nb_qps, + RTE_MAX_RXTX_INTR_VEC_ID); } - vsi->msix_intr = ret; - vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID); } else if (type != I40E_VSI_SRIOV) { ret = i40e_res_pool_alloc(&pf->msix_pool, 1); if (ret < 0) { @@ -4806,7 +5326,7 @@ i40e_vsi_setup(struct i40e_pf *pf, PMD_DRV_LOG(ERR, "Failed to get VSI params"); goto fail_msix_alloc; } - (void)rte_memcpy(&vsi->info, &ctxt.info, + rte_memcpy(&vsi->info, &ctxt.info, sizeof(struct i40e_aqc_vsi_properties_data)); vsi->vsi_id = ctxt.vsi_number; vsi->info.valid_sections = 0; @@ -4824,7 +5344,7 @@ i40e_vsi_setup(struct i40e_pf *pf, rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; - (void)rte_memcpy(&ctxt.info, &vsi->info, + rte_memcpy(&ctxt.info, &vsi->info, sizeof(struct i40e_aqc_vsi_properties_data)); ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, I40E_DEFAULT_TCMAP); @@ -4845,15 +5365,15 @@ i40e_vsi_setup(struct i40e_pf *pf, goto fail_msix_alloc; } - (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, + rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, sizeof(vsi->info.tc_mapping)); - (void)rte_memcpy(&vsi->info.queue_mapping, + rte_memcpy(&vsi->info.queue_mapping, &ctxt.info.queue_mapping, sizeof(vsi->info.queue_mapping)); vsi->info.mapping_flags = ctxt.info.mapping_flags; vsi->info.valid_sections = 0; - (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr, + rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr, ETH_ADDR_LEN); /** @@ -4996,7 +5516,7 @@ i40e_vsi_setup(struct i40e_pf *pf, } /* MAC/VLAN configuration */ - (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN); + rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN); filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; ret = i40e_vsi_add_mac(vsi, &filter); @@ -5108,7 +5628,7 @@ i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on) vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK); vsi->info.port_vlan_flags |= vlan_flags; ctxt.seid = vsi->seid; - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping", @@ -5125,10 +5645,14 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev) int mask = 0; /* Apply vlan offload setting */ - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK; - i40e_vlan_offload_set(dev, mask); - - /* Apply double-vlan setting, not implemented yet */ + mask = ETH_VLAN_STRIP_MASK | + ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK; + ret = i40e_vlan_offload_set(dev, mask); + if (ret) { + PMD_DRV_LOG(INFO, "Failed to update vlan offload"); + return ret; + } /* Apply pvid setting */ ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid, @@ -5216,10 +5740,8 @@ i40e_pf_setup(struct i40e_pf *pf) pf->offset_loaded = FALSE; memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats)); memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats)); - pf->internal_rx_bytes = 0; - pf->internal_tx_bytes = 0; - pf->internal_rx_bytes_offset = 0; - pf->internal_tx_bytes_offset = 0; + memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats)); + memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats)); ret = i40e_pf_get_switch_config(pf); if (ret != I40E_SUCCESS) { @@ -5684,7 +6206,8 @@ void i40e_pf_disable_irq0(struct i40e_hw *hw) { /* Disable all interrupt types */ - I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0); + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); I40E_WRITE_FLUSH(hw); } @@ -5734,16 +6257,16 @@ i40e_dev_handle_vfr_event(struct rte_eth_dev *dev) index = abs_vf_id / I40E_UINT32_BIT_SIZE; offset = abs_vf_id % I40E_UINT32_BIT_SIZE; val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index)); - /* VFR event occured */ + /* VFR event occurred */ if (val & (0x1 << offset)) { int ret; /* Clear the event first */ I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index), (0x1 << offset)); - PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id); + PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id); /** - * Only notify a VF reset event occured, + * Only notify a VF reset event occurred, * don't trigger another SW reset */ ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0); @@ -5904,7 +6427,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi, memset(req_list, 0, ele_buff_size); for (i = 0; i < actual_num; i++) { - (void)rte_memcpy(req_list[i].mac_addr, + rte_memcpy(req_list[i].mac_addr, &filter[num + i].macaddr, ETH_ADDR_LEN); req_list[i].vlan_tag = rte_cpu_to_le_16(filter[num + i].vlan_id); @@ -5979,7 +6502,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi, memset(req_list, 0, ele_buff_size); for (i = 0; i < actual_num; i++) { - (void)rte_memcpy(req_list[i].mac_addr, + rte_memcpy(req_list[i].mac_addr, &filter[num + i].macaddr, ETH_ADDR_LEN); req_list[i].vlan_tag = rte_cpu_to_le_16(filter[num + i].vlan_id); @@ -6130,7 +6653,7 @@ i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, "vlan number doesn't match"); return I40E_ERR_PARAM; } - (void)rte_memcpy(&mv_f[i].macaddr, + rte_memcpy(&mv_f[i].macaddr, addr, ETH_ADDR_LEN); mv_f[i].vlan_id = j * I40E_UINT32_BIT_SIZE + k; @@ -6159,7 +6682,7 @@ i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi, PMD_DRV_LOG(ERR, "buffer number not match"); return I40E_ERR_PARAM; } - (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, + rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); mv_f[i].vlan_id = vlan; mv_f[i].filter_type = f->mac_info.filter_type; @@ -6195,7 +6718,7 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi) i = 0; if (vsi->vlan_num == 0) { TAILQ_FOREACH(f, &vsi->mac_list, next) { - (void)rte_memcpy(&mv_f[i].macaddr, + rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); mv_f[i].filter_type = f->mac_info.filter_type; mv_f[i].vlan_id = 0; @@ -6365,7 +6888,7 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) for (i = 0; i < vlan_num; i++) { mv_f[i].filter_type = mac_filter->filter_type; - (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr, + rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr, ETH_ADDR_LEN); } @@ -6388,7 +6911,7 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) ret = I40E_ERR_NO_MEMORY; goto DONE; } - (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr, + rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr, ETH_ADDR_LEN); f->mac_info.filter_type = mac_filter->filter_type; TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); @@ -6435,7 +6958,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr) for (i = 0; i < vlan_num; i++) { mv_f[i].filter_type = filter_type; - (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, + rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); } if (filter_type == RTE_MACVLAN_PERFECT_MATCH || @@ -6462,104 +6985,36 @@ DONE: /* Configure hash enable flags for RSS */ uint64_t -i40e_config_hena(uint64_t flags, enum i40e_mac_type type) +i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags) { uint64_t hena = 0; + int i; if (!flags) return hena; - if (flags & ETH_RSS_FRAG_IPV4) - hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4; - if (flags & ETH_RSS_NONFRAG_IPV4_TCP) { - if (type == I40E_MAC_X722) { - hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | - (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); - } else - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP; - } - if (flags & ETH_RSS_NONFRAG_IPV4_UDP) { - if (type == I40E_MAC_X722) { - hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | - (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); - } else - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP; - } - if (flags & ETH_RSS_NONFRAG_IPV4_SCTP) - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; - if (flags & ETH_RSS_NONFRAG_IPV4_OTHER) - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; - if (flags & ETH_RSS_FRAG_IPV6) - hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6; - if (flags & ETH_RSS_NONFRAG_IPV6_TCP) { - if (type == I40E_MAC_X722) { - hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | - (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); - } else - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP; - } - if (flags & ETH_RSS_NONFRAG_IPV6_UDP) { - if (type == I40E_MAC_X722) { - hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | - (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); - } else - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP; + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) { + if (flags & (1ULL << i)) + hena |= adapter->pctypes_tbl[i]; } - if (flags & ETH_RSS_NONFRAG_IPV6_SCTP) - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP; - if (flags & ETH_RSS_NONFRAG_IPV6_OTHER) - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; - if (flags & ETH_RSS_L2_PAYLOAD) - hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD; return hena; } /* Parse the hash enable flags */ uint64_t -i40e_parse_hena(uint64_t flags) +i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags) { uint64_t rss_hf = 0; if (!flags) return rss_hf; - if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4)) - rss_hf |= ETH_RSS_FRAG_IPV4; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) - rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK)) - rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) - rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP)) - rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP)) - rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP)) - rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)) - rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER; - if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6)) - rss_hf |= ETH_RSS_FRAG_IPV6; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) - rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK)) - rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) - rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP)) - rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) - rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP)) - rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER)) - rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER; - if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD)) - rss_hf |= ETH_RSS_L2_PAYLOAD; + int i; + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) { + if (flags & adapter->pctypes_tbl[i]) + rss_hf |= (1ULL << i); + } return rss_hf; } @@ -6568,30 +7023,26 @@ static void i40e_pf_disable_rss(struct i40e_pf *pf) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); - uint64_t hena; - hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); - hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; - if (hw->mac.type == I40E_MAC_X722) - hena &= ~I40E_RSS_HENA_ALL_X722; - else - hena &= ~I40E_RSS_HENA_ALL; - i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); - i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); I40E_WRITE_FLUSH(hw); } -static int +int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) { struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ? + I40E_VFQF_HKEY_MAX_INDEX : + I40E_PFQF_HKEY_MAX_INDEX; int ret = 0; if (!key || key_len == 0) { PMD_DRV_LOG(DEBUG, "No key to be configured"); return 0; - } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) * + } else if (key_len != (key_idx + 1) * sizeof(uint32_t)) { PMD_DRV_LOG(ERR, "Invalid key length %u", key_len); return -EINVAL; @@ -6608,8 +7059,18 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) uint32_t *hash_key = (uint32_t *)key; uint16_t i; - for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]); + if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + I40E_WRITE_REG( + hw, + I40E_VFQF_HKEY1(i, vsi->user_param), + hash_key[i]); + + } else { + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) + I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), + hash_key[i]); + } I40E_WRITE_FLUSH(hw); } @@ -6621,6 +7082,7 @@ i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len) { struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint32_t reg; int ret; if (!key || !key_len) @@ -6637,11 +7099,22 @@ i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len) uint32_t *key_dw = (uint32_t *)key; uint16_t i; - for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); + if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) { + reg = I40E_VFQF_HKEY1(i, vsi->user_param); + key_dw[i] = i40e_read_rx_ctl(hw, reg); + } + *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + } else { + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { + reg = I40E_PFQF_HKEY(i); + key_dw[i] = i40e_read_rx_ctl(hw, reg); + } + *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + } } - *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); - return 0; } @@ -6649,7 +7122,6 @@ static int i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); - uint64_t rss_hf; uint64_t hena; int ret; @@ -6658,14 +7130,7 @@ i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf) if (ret) return ret; - rss_hf = rss_conf->rss_hf; - hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); - hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; - if (hw->mac.type == I40E_MAC_X722) - hena &= ~I40E_RSS_HENA_ALL_X722; - else - hena &= ~I40E_RSS_HENA_ALL; - hena |= i40e_config_hena(rss_hf, hw->mac.type); + hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); I40E_WRITE_FLUSH(hw); @@ -6679,14 +7144,13 @@ i40e_dev_rss_hash_update(struct rte_eth_dev *dev, { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL; + uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask; uint64_t hena; hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; - if (!(hena & ((hw->mac.type == I40E_MAC_X722) - ? I40E_RSS_HENA_ALL_X722 - : I40E_RSS_HENA_ALL))) { /* RSS disabled */ + + if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */ if (rss_hf != 0) /* Enable RSS */ return -EINVAL; return 0; /* Nothing to do */ @@ -6711,7 +7175,7 @@ i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; - rss_conf->rss_hf = i40e_parse_hena(hena); + rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena); return 0; } @@ -6843,7 +7307,7 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, uint8_t add) { uint16_t ip_type; - uint32_t ipv4_addr; + uint32_t ipv4_addr, ipv4_addr_le; uint8_t i, tun_type = 0; /* internal varialbe to convert ipv6 byte order */ uint32_t convert_ipv6[4]; @@ -6876,8 +7340,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr); + ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr); rte_memcpy(&pfilter->element.ipaddr.v4.data, - &rte_cpu_to_le_32(ipv4_addr), + &ipv4_addr_le, sizeof(pfilter->element.ipaddr.v4.data)); } else { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; @@ -6928,11 +7393,13 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input); if (add && node) { PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!"); + rte_free(cld_filter); return -EINVAL; } if (!add && !node) { PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!"); + rte_free(cld_filter); return -EINVAL; } @@ -6941,16 +7408,26 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, vsi->seid, &cld_filter->element, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to add a tunnel filter."); + rte_free(cld_filter); return -ENOTSUP; } tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); + if (tunnel == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + rte_free(cld_filter); + return -ENOMEM; + } + rte_memcpy(tunnel, &check_filter, sizeof(check_filter)); ret = i40e_sw_tunnel_filter_insert(pf, tunnel); + if (ret < 0) + rte_free(tunnel); } else { ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, &cld_filter->element, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); + rte_free(cld_filter); return -ENOTSUP; } ret = i40e_sw_tunnel_filter_del(pf, &node->input); @@ -6976,6 +7453,11 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) struct i40e_hw *hw = I40E_PF_TO_HW(pf); enum i40e_status_code status = I40E_SUCCESS; + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace l1 filter is not supported."); + return I40E_NOT_SUPPORTED; + } + memset(&filter_replace, 0, sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); memset(&filter_replace_buf, 0, @@ -6984,7 +7466,7 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) /* create L1 filter */ filter_replace.old_filter_type = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC; - filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11; filter_replace.tr_bit = 0; /* Prepare the buffer, 3 entries */ @@ -7012,6 +7494,13 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) status = i40e_aq_replace_cloud_filters(hw, &filter_replace, &filter_replace_buf); + if (!status) { + i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER); + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud l1 type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); + } return status; } @@ -7023,6 +7512,11 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) struct i40e_hw *hw = I40E_PF_TO_HW(pf); enum i40e_status_code status = I40E_SUCCESS; + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace cloud filter is not supported."); + return I40E_NOT_SUPPORTED; + } + /* For MPLSoUDP */ memset(&filter_replace, 0, sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); @@ -7032,18 +7526,22 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) I40E_AQC_MIRROR_CLOUD_FILTER; filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP; filter_replace.new_filter_type = - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP; + I40E_AQC_ADD_CLOUD_FILTER_0X11; /* Prepare the buffer, 2 entries */ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; filter_replace_buf.data[0] |= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; - filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS; + filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11; filter_replace_buf.data[4] |= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; status = i40e_aq_replace_cloud_filters(hw, &filter_replace, &filter_replace_buf); if (status < 0) return status; + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud filter type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); /* For MPLSoGRE */ memset(&filter_replace, 0, @@ -7055,64 +7553,223 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) I40E_AQC_MIRROR_CLOUD_FILTER; filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC; filter_replace.new_filter_type = - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE; + I40E_AQC_ADD_CLOUD_FILTER_0X12; /* Prepare the buffer, 2 entries */ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; filter_replace_buf.data[0] |= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; - filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS; + filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11; filter_replace_buf.data[4] |= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; status = i40e_aq_replace_cloud_filters(hw, &filter_replace, &filter_replace_buf); + if (!status) { + i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER); + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud filter type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); + } return status; } -int -i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, - struct i40e_tunnel_filter_conf *tunnel_filter, - uint8_t add) +static enum i40e_status_code +i40e_replace_gtp_l1_filter(struct i40e_pf *pf) { - uint16_t ip_type; - uint32_t ipv4_addr; - uint8_t i, tun_type = 0; - /* internal variable to convert ipv6 byte order */ - uint32_t convert_ipv6[4]; - int val, ret = 0; - struct i40e_pf_vf *vf = NULL; + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct i40e_vsi *vsi; - struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter; - struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter; - struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; - struct i40e_tunnel_filter *tunnel, *node; - struct i40e_tunnel_filter check_filter; /* Check if filter exists */ - uint32_t teid_le; - bool big_buffer = 0; - - cld_filter = rte_zmalloc("tunnel_filter", - sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext), - 0); + enum i40e_status_code status = I40E_SUCCESS; - if (cld_filter == NULL) { - PMD_DRV_LOG(ERR, "Failed to alloc memory."); - return -ENOMEM; + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace l1 filter is not supported."); + return I40E_NOT_SUPPORTED; } - pfilter = cld_filter; - ether_addr_copy(&tunnel_filter->outer_mac, - (struct ether_addr *)&pfilter->element.outer_mac); - ether_addr_copy(&tunnel_filter->inner_mac, - (struct ether_addr *)&pfilter->element.inner_mac); + /* For GTP-C */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + /* create L1 filter */ + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12; + filter_replace.tr_bit = I40E_AQC_NEW_TR_22 | + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[2] = 0xFF; + filter_replace_buf.data[3] = 0xFF; + filter_replace_buf.data[4] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[6] = 0xFF; + filter_replace_buf.data[7] = 0xFF; + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (status < 0) + return status; + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud l1 type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); - pfilter->element.inner_vlan = - rte_cpu_to_le_16(tunnel_filter->inner_vlan); - if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) { - ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; + /* for GTP-U */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + /* create L1 filter */ + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13; + filter_replace.tr_bit = I40E_AQC_NEW_TR_21 | + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[2] = 0xFF; + filter_replace_buf.data[3] = 0xFF; + filter_replace_buf.data[4] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[6] = 0xFF; + filter_replace_buf.data[7] = 0xFF; + + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (!status) { + i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER); + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud l1 type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); + } + return status; +} + +static enum +i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf) +{ + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + enum i40e_status_code status = I40E_SUCCESS; + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace cloud filter is not supported."); + return I40E_NOT_SUPPORTED; + } + + /* for GTP-C */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER; + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_0X11; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (status < 0) + return status; + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud filter type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); + + /* for GTP-U */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER; + filter_replace.old_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_0X12; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (!status) { + i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER); + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud filter type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); + } + return status; +} + +int +i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, + struct i40e_tunnel_filter_conf *tunnel_filter, + uint8_t add) +{ + uint16_t ip_type; + uint32_t ipv4_addr, ipv4_addr_le; + uint8_t i, tun_type = 0; + /* internal variable to convert ipv6 byte order */ + uint32_t convert_ipv6[4]; + int val, ret = 0; + struct i40e_pf_vf *vf = NULL; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi; + struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter; + struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter; + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; + struct i40e_tunnel_filter *tunnel, *node; + struct i40e_tunnel_filter check_filter; /* Check if filter exists */ + uint32_t teid_le; + bool big_buffer = 0; + + cld_filter = rte_zmalloc("tunnel_filter", + sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext), + 0); + + if (cld_filter == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + pfilter = cld_filter; + + ether_addr_copy(&tunnel_filter->outer_mac, + (struct ether_addr *)&pfilter->element.outer_mac); + ether_addr_copy(&tunnel_filter->inner_mac, + (struct ether_addr *)&pfilter->element.inner_mac); + + pfilter->element.inner_vlan = + rte_cpu_to_le_16(tunnel_filter->inner_vlan); + if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr); + ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr); rte_memcpy(&pfilter->element.ipaddr.v4.data, - &rte_cpu_to_le_32(ipv4_addr), + &ipv4_addr_le, sizeof(pfilter->element.ipaddr.v4.data)); } else { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; @@ -7151,7 +7808,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] = 0x40; big_buffer = 1; - tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP; + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP; break; case I40E_TUNNEL_TYPE_MPLSoGRE: if (!pf->mpls_replace_flag) { @@ -7167,7 +7824,37 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] = 0x0; big_buffer = 1; - tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE; + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE; + break; + case I40E_TUNNEL_TYPE_GTPC: + if (!pf->gtp_replace_flag) { + i40e_replace_gtp_l1_filter(pf); + i40e_replace_gtp_cloud_filter(pf); + pf->gtp_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] = + (teid_le >> 16) & 0xFFFF; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] = + teid_le & 0xFFFF; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] = + 0x0; + big_buffer = 1; + break; + case I40E_TUNNEL_TYPE_GTPU: + if (!pf->gtp_replace_flag) { + i40e_replace_gtp_l1_filter(pf); + i40e_replace_gtp_cloud_filter(pf); + pf->gtp_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] = + (teid_le >> 16) & 0xFFFF; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] = + teid_le & 0xFFFF; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] = + 0x0; + big_buffer = 1; break; case I40E_TUNNEL_TYPE_QINQ: if (!pf->qinq_replace_flag) { @@ -7195,13 +7882,19 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP) pfilter->element.flags = - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP; + I40E_AQC_ADD_CLOUD_FILTER_0X11; else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE) pfilter->element.flags = - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE; + I40E_AQC_ADD_CLOUD_FILTER_0X12; + else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC) + pfilter->element.flags = + I40E_AQC_ADD_CLOUD_FILTER_0X11; + else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU) + pfilter->element.flags = + I40E_AQC_ADD_CLOUD_FILTER_0X12; else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ) pfilter->element.flags |= - I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + I40E_AQC_ADD_CLOUD_FILTER_0X10; else { val = i40e_dev_get_filter_type(tunnel_filter->filter_type, &pfilter->element.flags); @@ -7223,6 +7916,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, else { if (tunnel_filter->vf_id >= pf->vf_num) { PMD_DRV_LOG(ERR, "Invalid argument."); + rte_free(cld_filter); return -EINVAL; } vf = &pf->vfs[tunnel_filter->vf_id]; @@ -7237,11 +7931,13 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input); if (add && node) { PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!"); + rte_free(cld_filter); return -EINVAL; } if (!add && !node) { PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!"); + rte_free(cld_filter); return -EINVAL; } @@ -7254,11 +7950,20 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, vsi->seid, &cld_filter->element, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to add a tunnel filter."); + rte_free(cld_filter); return -ENOTSUP; } tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); + if (tunnel == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + rte_free(cld_filter); + return -ENOMEM; + } + rte_memcpy(tunnel, &check_filter, sizeof(check_filter)); ret = i40e_sw_tunnel_filter_insert(pf, tunnel); + if (ret < 0) + rte_free(tunnel); } else { if (big_buffer) ret = i40e_aq_remove_cloud_filters_big_buffer( @@ -7268,6 +7973,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, &cld_filter->element, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); + rte_free(cld_filter); return -ENOTSUP; } ret = i40e_sw_tunnel_filter_del(pf, &node->input); @@ -7460,7 +8166,7 @@ i40e_pf_config_rss(struct i40e_pf *pf) /* * If both VMDQ and RSS enabled, not all of PF queues are configured. - * It's necessary to calulate the actual PF queues that are configured. + * It's necessary to calculate the actual PF queues that are configured. */ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) num = i40e_pf_calc_configured_queues_num(pf); @@ -7486,7 +8192,7 @@ i40e_pf_config_rss(struct i40e_pf *pf) } rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf; - if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) { + if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) { i40e_pf_disable_rss(pf); return 0; } @@ -7545,9 +8251,15 @@ i40e_tunnel_filter_param_check(struct i40e_pf *pf, static int i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) { + struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; uint32_t val, reg; int ret = -EINVAL; + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported"); + return -ENOTSUP; + } + val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)); PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val); @@ -7565,6 +8277,10 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) reg, NULL); if (ret != 0) return ret; + PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed " + "with value 0x%08x", + I40E_GL_PRS_FVBM(2), reg); + i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN); } else { ret = 0; } @@ -7707,9 +8423,9 @@ static int i40e_get_hash_filter_global_config(struct i40e_hw *hw, struct rte_eth_hash_global_conf *g_cfg) { - uint32_t reg, mask = I40E_FLOW_TYPES; - uint16_t i; - enum i40e_filter_pctype pctype; + struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back; + uint32_t reg; + uint16_t i, j; memset(g_cfg, 0, sizeof(*g_cfg)); reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); @@ -7720,29 +8436,41 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw, PMD_DRV_LOG(DEBUG, "Hash function is %s", (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR"); - for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) { - if (!(mask & (1UL << i))) - continue; - mask &= ~(1UL << i); - /* Bit set indicats the coresponding flow type is supported */ - g_cfg->valid_bit_mask[0] |= (1UL << i); - /* if flowtype is invalid, continue */ - if (!I40E_VALID_FLOW(i)) + /* + * As i40e supports less than 64 flow types, only first 64 bits need to + * be checked. + */ + for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) { + g_cfg->valid_bit_mask[i] = 0ULL; + g_cfg->sym_hash_enable_mask[i] = 0ULL; + } + + g_cfg->valid_bit_mask[0] = adapter->flow_types_mask; + + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) { + if (!adapter->pctypes_tbl[i]) continue; - pctype = i40e_flowtype_to_pctype(i); - reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype)); - if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) - g_cfg->sym_hash_enable_mask[0] |= (1UL << i); + for (j = I40E_FILTER_PCTYPE_INVALID + 1; + j < I40E_FILTER_PCTYPE_MAX; j++) { + if (adapter->pctypes_tbl[i] & (1ULL << j)) { + reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j)); + if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) { + g_cfg->sym_hash_enable_mask[0] |= + (1ULL << i); + } + } + } } return 0; } static int -i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg) +i40e_hash_global_config_check(const struct i40e_adapter *adapter, + const struct rte_eth_hash_global_conf *g_cfg) { uint32_t i; - uint32_t mask0, i40e_mask = I40E_FLOW_TYPES; + uint64_t mask0, i40e_mask = adapter->flow_types_mask; if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ && g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR && @@ -7753,7 +8481,7 @@ i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg) } /* - * As i40e supports less than 32 flow types, only first 32 bits need to + * As i40e supports less than 64 flow types, only first 64 bits need to * be checked. */ mask0 = g_cfg->valid_bit_mask[0]; @@ -7785,64 +8513,40 @@ static int i40e_set_hash_filter_global_config(struct i40e_hw *hw, struct rte_eth_hash_global_conf *g_cfg) { + struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back; + struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; int ret; - uint16_t i; + uint16_t i, j; uint32_t reg; - uint32_t mask0 = g_cfg->valid_bit_mask[0]; - enum i40e_filter_pctype pctype; + uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask; + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Hash global configuration is not supported."); + return -ENOTSUP; + } /* Check the input parameters */ - ret = i40e_hash_global_config_check(g_cfg); + ret = i40e_hash_global_config_check(adapter, g_cfg); if (ret < 0) return ret; - for (i = 0; mask0 && i < UINT32_BIT; i++) { - if (!(mask0 & (1UL << i))) - continue; - mask0 &= ~(1UL << i); - /* if flowtype is invalid, continue */ - if (!I40E_VALID_FLOW(i)) - continue; - pctype = i40e_flowtype_to_pctype(i); - reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ? - I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; - if (hw->mac.type == I40E_MAC_X722) { - if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) { - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg); - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP), - reg); - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP), - reg); - } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) { - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg); - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK), - reg); - } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) { - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg); - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP), - reg); - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP), - reg); - } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) { - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg); - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK), - reg); - } else { - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), - reg); + /* + * As i40e supports less than 64 flow types, only first 64 bits need to + * be configured. + */ + for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) { + if (mask0 & (1UL << i)) { + reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ? + I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; + + for (j = I40E_FILTER_PCTYPE_INVALID + 1; + j < I40E_FILTER_PCTYPE_MAX; j++) { + if (adapter->pctypes_tbl[i] & (1ULL << j)) + i40e_write_global_rx_ctl(hw, + I40E_GLQF_HSYM(j), + reg); } - } else { - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg); + i40e_global_cfg_warning(I40E_WARNING_HSYM); } } @@ -7867,7 +8571,8 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw, /* Use the default, and keep it as it is */ goto out; - i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg); + i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg); + i40e_global_cfg_warning(I40E_WARNING_QF_CTL); out: I40E_WRITE_FLUSH(hw); @@ -8136,7 +8841,7 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, /** * Validate if the input set is allowed for a specific PCTYPE */ -static int +int i40e_validate_input_set(enum i40e_filter_pctype pctype, enum rte_filter_type filter, uint64_t inset) { @@ -8311,7 +9016,7 @@ i40e_parse_input_set(uint64_t *inset, * Translate the input set from bit masks to register aware bit masks * and vice versa */ -static uint64_t +uint64_t i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input) { uint64_t val = 0; @@ -8396,7 +9101,7 @@ i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input) return val; } -static int +int i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem) { uint8_t i, idx = 0; @@ -8444,7 +9149,7 @@ i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem) return idx; } -static void +void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) { uint32_t reg = i40e_read_rx_ctl(hw, addr); @@ -8456,6 +9161,18 @@ i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) (uint32_t)i40e_read_rx_ctl(hw, addr)); } +void +i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) +{ + uint32_t reg = i40e_read_rx_ctl(hw, addr); + + PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg); + if (reg != val) + i40e_write_global_rx_ctl(hw, addr, val); + PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr, + (uint32_t)i40e_read_rx_ctl(hw, addr)); +} + static void i40e_filter_input_set_init(struct i40e_pf *pf) { @@ -8464,16 +9181,14 @@ i40e_filter_input_set_init(struct i40e_pf *pf) uint64_t input_set, inset_reg; uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0}; int num, i; + uint16_t flow_type; for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) { - if (hw->mac.type == I40E_MAC_X722) { - if (!I40E_VALID_PCTYPE_X722(pctype)) - continue; - } else { - if (!I40E_VALID_PCTYPE(pctype)) - continue; - } + flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype); + + if (flow_type == RTE_ETH_FLOW_UNKNOWN) + continue; input_set = i40e_get_default_input_set(pctype); @@ -8481,6 +9196,10 @@ i40e_filter_input_set_init(struct i40e_pf *pf) I40E_INSET_MASK_NUM_REG); if (num < 0) return; + if (pf->support_multi_driver && num > 0) { + PMD_DRV_LOG(ERR, "Input set setting is not supported."); + return; + } inset_reg = i40e_translate_input_set_reg(hw->mac.type, input_set); @@ -8489,31 +9208,48 @@ i40e_filter_input_set_init(struct i40e_pf *pf) i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1), (uint32_t)((inset_reg >> I40E_32_BIT_WIDTH) & UINT32_MAX)); - i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), - (uint32_t)(inset_reg & UINT32_MAX)); - i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), - (uint32_t)((inset_reg >> - I40E_32_BIT_WIDTH) & UINT32_MAX)); - - for (i = 0; i < num; i++) { - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), - mask_reg[i]); - i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), - mask_reg[i]); - } - /*clear unused mask registers of the pctype */ - for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) { - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), - 0); - i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), - 0); + if (!pf->support_multi_driver) { + i40e_check_write_global_reg(hw, + I40E_GLQF_HASH_INSET(0, pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_global_reg(hw, + I40E_GLQF_HASH_INSET(1, pctype), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + + for (i = 0; i < num; i++) { + i40e_check_write_global_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); + i40e_check_write_global_reg(hw, + I40E_GLQF_HASH_MSK(i, pctype), + mask_reg[i]); + } + /*clear unused mask registers of the pctype */ + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) { + i40e_check_write_global_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), + 0); + i40e_check_write_global_reg(hw, + I40E_GLQF_HASH_MSK(i, pctype), + 0); + } + } else { + PMD_DRV_LOG(ERR, "Input set setting is not supported."); } I40E_WRITE_FLUSH(hw); /* store the default input set */ - pf->hash_input_set[pctype] = input_set; + if (!pf->support_multi_driver) + pf->hash_input_set[pctype] = input_set; pf->fdir.input_set[pctype] = input_set; } + + if (!pf->support_multi_driver) { + i40e_global_cfg_warning(I40E_WARNING_HASH_INSET); + i40e_global_cfg_warning(I40E_WARNING_FD_MSK); + i40e_global_cfg_warning(I40E_WARNING_HASH_MSK); + } } int @@ -8536,7 +9272,13 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, return -EINVAL; } - if (!I40E_VALID_FLOW(conf->flow_type)) { + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Hash input set setting is not supported."); + return -ENOTSUP; + } + + pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type); + if (pctype == I40E_FILTER_PCTYPE_INVALID) { PMD_DRV_LOG(ERR, "invalid flow_type input."); return -EINVAL; } @@ -8544,10 +9286,8 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, if (hw->mac.type == I40E_MAC_X722) { /* get translated pctype value in fd pctype register */ pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw, - I40E_GLQF_FD_PCTYPES((int)i40e_flowtype_to_pctype( - conf->flow_type))); - } else - pctype = i40e_flowtype_to_pctype(conf->flow_type); + I40E_GLQF_FD_PCTYPES((int)pctype)); + } ret = i40e_parse_input_set(&input_set, pctype, conf->field, conf->inset_size); @@ -8555,11 +9295,7 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, PMD_DRV_LOG(ERR, "Failed to parse input set"); return -EINVAL; } - if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_HASH, - input_set) != 0) { - PMD_DRV_LOG(ERR, "Invalid input set"); - return -EINVAL; - } + if (conf->op == RTE_ETH_INPUT_SET_ADD) { /* get inset value in register */ inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype)); @@ -8574,19 +9310,21 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set); - i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), - (uint32_t)(inset_reg & UINT32_MAX)); - i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), - (uint32_t)((inset_reg >> - I40E_32_BIT_WIDTH) & UINT32_MAX)); + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + i40e_global_cfg_warning(I40E_WARNING_HASH_INSET); for (i = 0; i < num; i++) - i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), - mask_reg[i]); + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), + mask_reg[i]); /*clear unused mask registers of the pctype */ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) - i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), - 0); + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), + 0); + i40e_global_cfg_warning(I40E_WARNING_HASH_MSK); I40E_WRITE_FLUSH(hw); pf->hash_input_set[pctype] = input_set; @@ -8613,24 +9351,19 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf, return -EINVAL; } - if (!I40E_VALID_FLOW(conf->flow_type)) { + pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type); + + if (pctype == I40E_FILTER_PCTYPE_INVALID) { PMD_DRV_LOG(ERR, "invalid flow_type input."); return -EINVAL; } - pctype = i40e_flowtype_to_pctype(conf->flow_type); - ret = i40e_parse_input_set(&input_set, pctype, conf->field, conf->inset_size); if (ret) { PMD_DRV_LOG(ERR, "Failed to parse input set"); return -EINVAL; } - if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR, - input_set) != 0) { - PMD_DRV_LOG(ERR, "Invalid input set"); - return -EINVAL; - } /* get inset value in register */ inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1)); @@ -8649,6 +9382,10 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf, I40E_INSET_MASK_NUM_REG); if (num < 0) return -EINVAL; + if (pf->support_multi_driver && num > 0) { + PMD_DRV_LOG(ERR, "FDIR bit mask is not supported."); + return -ENOTSUP; + } inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set); @@ -8658,13 +9395,20 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf, (uint32_t)((inset_reg >> I40E_32_BIT_WIDTH) & UINT32_MAX)); - for (i = 0; i < num; i++) - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), - mask_reg[i]); - /*clear unused mask registers of the pctype */ - for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), - 0); + if (!pf->support_multi_driver) { + for (i = 0; i < num; i++) + i40e_check_write_global_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); + /*clear unused mask registers of the pctype */ + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) + i40e_check_write_global_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), + 0); + i40e_global_cfg_warning(I40E_WARNING_FD_MSK); + } else { + PMD_DRV_LOG(ERR, "FDIR bit mask is not supported."); + } I40E_WRITE_FLUSH(hw); pf->fdir.input_set[pctype] = input_set; @@ -8910,9 +9654,16 @@ i40e_ethertype_filter_set(struct i40e_pf *pf, if (add) { ethertype_filter = rte_zmalloc("ethertype_filter", sizeof(*ethertype_filter), 0); + if (ethertype_filter == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + rte_memcpy(ethertype_filter, &check_filter, sizeof(check_filter)); ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter); + if (ret < 0) + rte_free(ethertype_filter); } else { ret = i40e_sw_ethertype_filter_del(pf, &node->input); } @@ -9069,72 +9820,42 @@ i40e_hw_init(struct rte_eth_dev *dev) i40e_set_symmetric_hash_enable_per_port(hw, 0); } +/* + * For X722 it is possible to have multiple pctypes mapped to the same flowtype + * however this function will return only one highest pctype index, + * which is not quite correct. This is known problem of i40e driver + * and needs to be fixed later. + */ enum i40e_filter_pctype -i40e_flowtype_to_pctype(uint16_t flow_type) -{ - static const enum i40e_filter_pctype pctype_table[] = { - [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4, - [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = - I40E_FILTER_PCTYPE_NONF_IPV4_UDP, - [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = - I40E_FILTER_PCTYPE_NONF_IPV4_TCP, - [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = - I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, - [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = - I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, - [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6, - [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = - I40E_FILTER_PCTYPE_NONF_IPV6_UDP, - [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = - I40E_FILTER_PCTYPE_NONF_IPV6_TCP, - [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = - I40E_FILTER_PCTYPE_NONF_IPV6_SCTP, - [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = - I40E_FILTER_PCTYPE_NONF_IPV6_OTHER, - [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD, - }; +i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type) +{ + int i; + uint64_t pctype_mask; - return pctype_table[flow_type]; + if (flow_type < I40E_FLOW_TYPE_MAX) { + pctype_mask = adapter->pctypes_tbl[flow_type]; + for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) { + if (pctype_mask & (1ULL << i)) + return (enum i40e_filter_pctype)i; + } + } + return I40E_FILTER_PCTYPE_INVALID; } uint16_t -i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) +i40e_pctype_to_flowtype(const struct i40e_adapter *adapter, + enum i40e_filter_pctype pctype) { - static const uint16_t flowtype_table[] = { - [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4, - [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = - RTE_ETH_FLOW_NONFRAG_IPV4_UDP, - [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] = - RTE_ETH_FLOW_NONFRAG_IPV4_UDP, - [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] = - RTE_ETH_FLOW_NONFRAG_IPV4_UDP, - [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = - RTE_ETH_FLOW_NONFRAG_IPV4_TCP, - [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] = - RTE_ETH_FLOW_NONFRAG_IPV4_TCP, - [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = - RTE_ETH_FLOW_NONFRAG_IPV4_SCTP, - [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = - RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, - [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6, - [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = - RTE_ETH_FLOW_NONFRAG_IPV6_UDP, - [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] = - RTE_ETH_FLOW_NONFRAG_IPV6_UDP, - [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] = - RTE_ETH_FLOW_NONFRAG_IPV6_UDP, - [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = - RTE_ETH_FLOW_NONFRAG_IPV6_TCP, - [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] = - RTE_ETH_FLOW_NONFRAG_IPV6_TCP, - [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = - RTE_ETH_FLOW_NONFRAG_IPV6_SCTP, - [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = - RTE_ETH_FLOW_NONFRAG_IPV6_OTHER, - [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD, - }; + uint16_t flowtype; + uint64_t pctype_mask = 1ULL << pctype; + + for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX; + flowtype++) { + if (adapter->pctypes_tbl[flowtype] & pctype_mask) + return flowtype; + } - return flowtype_table[pctype]; + return RTE_ETH_FLOW_UNKNOWN; } /* @@ -9150,8 +9871,9 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) */ /* For both X710 and XL710 */ -#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200 -#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00 +#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200 +#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x203F0200 +#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200 #define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08 @@ -9172,13 +9894,22 @@ i40e_dev_sync_phy_type(struct i40e_hw *hw) enum i40e_status_code status; struct i40e_aq_get_phy_abilities_resp phy_ab; int ret = -ENOTSUP; + int retries = 0; status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab, NULL); - if (status) - return ret; - + while (status) { + PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d", + status); + retries++; + rte_delay_us(100000); + if (retries < 5) + status = i40e_aq_get_phy_capabilities(hw, false, + true, &phy_ab, NULL); + else + return ret; + } return 0; } @@ -9203,8 +9934,12 @@ i40e_configure_registers(struct i40e_hw *hw) reg_table[i].val = I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE; else /* For X710/XL710/XXV710 */ - reg_table[i].val = - I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE; + if (hw->aq.fw_maj_ver < 6) + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1; + else + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2; } if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) { @@ -9665,9 +10400,8 @@ i40e_start_timecounters(struct rte_eth_dev *dev) uint32_t tsync_inc_h; /* Get current link speed. */ - memset(&link, 0, sizeof(link)); i40e_dev_link_update(dev, 1); - rte_i40e_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); switch (link.link_speed) { case ETH_SPEED_NUM_40G: @@ -10147,9 +10881,9 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map) goto out; } /* update the local VSI info with updated queue map */ - (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, + rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, sizeof(vsi->info.tc_mapping)); - (void)rte_memcpy(&vsi->info.queue_mapping, + rte_memcpy(&vsi->info.queue_mapping, &ctxt.info.queue_mapping, sizeof(vsi->info.queue_mapping)); vsi->info.mapping_flags = ctxt.info.mapping_flags; @@ -10276,7 +11010,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, * * Returns 0 on success, negative value on failure */ -static int +int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -10463,27 +11197,21 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint16_t interval = - i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); uint16_t msix_intr; msix_intr = intr_handle->intr_vec[queue_id]; if (msix_intr == I40E_MISC_VEC_ID) I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, - I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (interval << - I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)); + I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); else I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - I40E_RX_VEC_START), I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (interval << - I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)); + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); I40E_WRITE_FLUSH(hw); rte_intr_enable(&pci_dev->intr_handle); @@ -10501,12 +11229,13 @@ i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) msix_intr = intr_handle->intr_vec[queue_id]; if (msix_intr == I40E_MISC_VEC_ID) - I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0); + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); else I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - I40E_RX_VEC_START), - 0); + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); I40E_WRITE_FLUSH(hw); return 0; @@ -10598,18 +11327,53 @@ static int i40e_get_eeprom(struct rte_eth_dev *dev, return 0; } -static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr) +static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *mac_addr) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + struct i40e_mac_filter_info mac_filter; + struct i40e_mac_filter *f; + int ret; if (!is_valid_assigned_ether_addr(mac_addr)) { PMD_DRV_LOG(ERR, "Tried to set invalid MAC address."); - return; + return -EINVAL; + } + + TAILQ_FOREACH(f, &vsi->mac_list, next) { + if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr)) + break; + } + + if (f == NULL) { + PMD_DRV_LOG(ERR, "Failed to find filter for default mac"); + return -EIO; } - /* Flags: 0x3 updates port address */ - i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL); + mac_filter = f->mac_info; + ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to delete mac filter"); + return -EIO; + } + memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN); + ret = i40e_vsi_add_mac(vsi, &mac_filter); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add mac filter"); + return -EIO; + } + memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN); + + ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, + mac_addr->addr_bytes, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to change mac"); + return -EIO; + } + + return 0; } static int @@ -10632,9 +11396,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } if (frame_size > ETHER_MAX_LEN) - dev_data->dev_conf.rxmode.jumbo_frame = 1; + dev_data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev_data->dev_conf.rxmode.jumbo_frame = 0; + dev_data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size; @@ -10708,14 +11474,14 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf) sizeof(f->input.general_fields)); if (((f->input.flags & - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) == - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) || + I40E_AQC_ADD_CLOUD_FILTER_0X11) == + I40E_AQC_ADD_CLOUD_FILTER_0X11) || ((f->input.flags & - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) == - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) || + I40E_AQC_ADD_CLOUD_FILTER_0X12) == + I40E_AQC_ADD_CLOUD_FILTER_0X12) || ((f->input.flags & - I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) == - I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ)) + I40E_AQC_ADD_CLOUD_FILTER_0X10) == + I40E_AQC_ADD_CLOUD_FILTER_0X10)) big_buffer = 1; if (big_buffer) @@ -10727,19 +11493,29 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf) } } +/* Restore rss filter */ +static inline void +i40e_rss_filter_restore(struct i40e_pf *pf) +{ + struct i40e_rte_flow_rss_conf *conf = + &pf->rss_info; + if (conf->num) + i40e_config_rss_filter(pf, conf, TRUE); +} + static void i40e_filter_restore(struct i40e_pf *pf) { i40e_ethertype_filter_restore(pf); i40e_tunnel_filter_restore(pf); i40e_fdir_filter_restore(pf); + i40e_rss_filter_restore(pf); } static bool is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) { - if (strcmp(dev->data->drv_name, - drv->driver.name)) + if (strcmp(dev->device->driver->name, drv->driver.name)) return false; return true; @@ -10751,6 +11527,388 @@ is_i40e_supported(struct rte_eth_dev *dev) return is_device_supported(dev, &rte_i40e_pmd); } +struct i40e_customized_pctype* +i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index) +{ + int i; + + for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) { + if (pf->customized_pctype[i].index == index) + return &pf->customized_pctype[i]; + } + return NULL; +} + +static int +i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, + uint32_t pkg_size, uint32_t proto_num, + struct rte_pmd_i40e_proto_info *proto, + enum rte_pmd_i40e_package_op op) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + uint32_t pctype_num; + struct rte_pmd_i40e_ptype_info *pctype; + uint32_t buff_size; + struct i40e_customized_pctype *new_pctype = NULL; + uint8_t proto_id; + uint8_t pctype_value; + char name[64]; + uint32_t i, j, n; + int ret; + + if (op != RTE_PMD_I40E_PKG_OP_WR_ADD && + op != RTE_PMD_I40E_PKG_OP_WR_DEL) { + PMD_DRV_LOG(ERR, "Unsupported operation."); + return -1; + } + + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)&pctype_num, sizeof(pctype_num), + RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get pctype number"); + return -1; + } + if (!pctype_num) { + PMD_DRV_LOG(INFO, "No new pctype added"); + return -1; + } + + buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info); + pctype = rte_zmalloc("new_pctype", buff_size, 0); + if (!pctype) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return -1; + } + /* get information about new pctype list */ + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)pctype, buff_size, + RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get pctype list"); + rte_free(pctype); + return -1; + } + + /* Update customized pctype. */ + for (i = 0; i < pctype_num; i++) { + pctype_value = pctype[i].ptype_id; + memset(name, 0, sizeof(name)); + for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) { + proto_id = pctype[i].protocols[j]; + if (proto_id == RTE_PMD_I40E_PROTO_UNUSED) + continue; + for (n = 0; n < proto_num; n++) { + if (proto[n].proto_id != proto_id) + continue; + strcat(name, proto[n].name); + strcat(name, "_"); + break; + } + } + name[strlen(name) - 1] = '\0'; + if (!strcmp(name, "GTPC")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPC); + else if (!strcmp(name, "GTPU_IPV4")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPU_IPV4); + else if (!strcmp(name, "GTPU_IPV6")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPU_IPV6); + else if (!strcmp(name, "GTPU")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPU); + if (new_pctype) { + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) { + new_pctype->pctype = pctype_value; + new_pctype->valid = true; + } else { + new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID; + new_pctype->valid = false; + } + } + } + + rte_free(pctype); + return 0; +} + +static int +i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, + uint32_t pkg_size, uint32_t proto_num, + struct rte_pmd_i40e_proto_info *proto, + enum rte_pmd_i40e_package_op op) +{ + struct rte_pmd_i40e_ptype_mapping *ptype_mapping; + uint16_t port_id = dev->data->port_id; + uint32_t ptype_num; + struct rte_pmd_i40e_ptype_info *ptype; + uint32_t buff_size; + uint8_t proto_id; + char name[RTE_PMD_I40E_DDP_NAME_SIZE]; + uint32_t i, j, n; + bool in_tunnel; + int ret; + + if (op != RTE_PMD_I40E_PKG_OP_WR_ADD && + op != RTE_PMD_I40E_PKG_OP_WR_DEL) { + PMD_DRV_LOG(ERR, "Unsupported operation."); + return -1; + } + + if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) { + rte_pmd_i40e_ptype_mapping_reset(port_id); + return 0; + } + + /* get information about new ptype num */ + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)&ptype_num, sizeof(ptype_num), + RTE_PMD_I40E_PKG_INFO_PTYPE_NUM); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get ptype number"); + return ret; + } + if (!ptype_num) { + PMD_DRV_LOG(INFO, "No new ptype added"); + return -1; + } + + buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info); + ptype = rte_zmalloc("new_ptype", buff_size, 0); + if (!ptype) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return -1; + } + + /* get information about new ptype list */ + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)ptype, buff_size, + RTE_PMD_I40E_PKG_INFO_PTYPE_LIST); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get ptype list"); + rte_free(ptype); + return ret; + } + + buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping); + ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0); + if (!ptype_mapping) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + rte_free(ptype); + return -1; + } + + /* Update ptype mapping table. */ + for (i = 0; i < ptype_num; i++) { + ptype_mapping[i].hw_ptype = ptype[i].ptype_id; + ptype_mapping[i].sw_ptype = 0; + in_tunnel = false; + for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) { + proto_id = ptype[i].protocols[j]; + if (proto_id == RTE_PMD_I40E_PROTO_UNUSED) + continue; + for (n = 0; n < proto_num; n++) { + if (proto[n].proto_id != proto_id) + continue; + memset(name, 0, sizeof(name)); + strcpy(name, proto[n].name); + if (!strncasecmp(name, "PPPOE", 5)) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L2_ETHER_PPPOE; + else if (!strncasecmp(name, "IPV4FRAG", 8) && + !in_tunnel) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_FRAG; + } else if (!strncasecmp(name, "IPV4FRAG", 8) && + in_tunnel) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_FRAG; + } else if (!strncasecmp(name, "OIPV4", 5)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + in_tunnel = true; + } else if (!strncasecmp(name, "IPV4", 4) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + else if (!strncasecmp(name, "IPV4", 4) && + in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; + else if (!strncasecmp(name, "IPV6FRAG", 8) && + !in_tunnel) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_FRAG; + } else if (!strncasecmp(name, "IPV6FRAG", 8) && + in_tunnel) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_FRAG; + } else if (!strncasecmp(name, "OIPV6", 5)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + in_tunnel = true; + } else if (!strncasecmp(name, "IPV6", 4) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + else if (!strncasecmp(name, "IPV6", 4) && + in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; + else if (!strncasecmp(name, "UDP", 3) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_UDP; + else if (!strncasecmp(name, "UDP", 3) && + in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_UDP; + else if (!strncasecmp(name, "TCP", 3) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_TCP; + else if (!strncasecmp(name, "TCP", 3) && + in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_TCP; + else if (!strncasecmp(name, "SCTP", 4) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_SCTP; + else if (!strncasecmp(name, "SCTP", 4) && + in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_SCTP; + else if ((!strncasecmp(name, "ICMP", 4) || + !strncasecmp(name, "ICMPV6", 6)) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_ICMP; + else if ((!strncasecmp(name, "ICMP", 4) || + !strncasecmp(name, "ICMPV6", 6)) && + in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_ICMP; + else if (!strncasecmp(name, "GTPC", 4)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_GTPC; + in_tunnel = true; + } else if (!strncasecmp(name, "GTPU", 4)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_GTPU; + in_tunnel = true; + } else if (!strncasecmp(name, "GRENAT", 6)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_GRENAT; + in_tunnel = true; + } else if (!strncasecmp(name, "L2TPV2CTL", 9)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_L2TP; + in_tunnel = true; + } + + break; + } + } + } + + ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping, + ptype_num, 0); + if (ret) + PMD_DRV_LOG(ERR, "Failed to update mapping table."); + + rte_free(ptype_mapping); + rte_free(ptype); + return ret; +} + +void +i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, + uint32_t pkg_size, enum rte_pmd_i40e_package_op op) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + uint32_t proto_num; + struct rte_pmd_i40e_proto_info *proto; + uint32_t buff_size; + uint32_t i; + int ret; + + if (op != RTE_PMD_I40E_PKG_OP_WR_ADD && + op != RTE_PMD_I40E_PKG_OP_WR_DEL) { + PMD_DRV_LOG(ERR, "Unsupported operation."); + return; + } + + /* get information about protocol number */ + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)&proto_num, sizeof(proto_num), + RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get protocol number"); + return; + } + if (!proto_num) { + PMD_DRV_LOG(INFO, "No new protocol added"); + return; + } + + buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info); + proto = rte_zmalloc("new_proto", buff_size, 0); + if (!proto) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return; + } + + /* get information about protocol list */ + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)proto, buff_size, + RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get protocol list"); + rte_free(proto); + return; + } + + /* Check if GTP is supported. */ + for (i = 0; i < proto_num; i++) { + if (!strncmp(proto[i].name, "GTP", 3)) { + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) + pf->gtp_support = true; + else + pf->gtp_support = false; + break; + } + } + + /* Update customized pctype info */ + ret = i40e_update_customized_pctype(dev, pkg, pkg_size, + proto_num, proto, op); + if (ret) + PMD_DRV_LOG(INFO, "No pctype is updated."); + + /* Update customized ptype info */ + ret = i40e_update_customized_ptype(dev, pkg, pkg_size, + proto_num, proto, op); + if (ret) + PMD_DRV_LOG(INFO, "No ptype is updated."); + + rte_free(proto); +} + /* Create a QinQ cloud filter * * The Fortville NIC has limited resources for tunnel filters, @@ -10802,6 +11960,11 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace cloud filter is not supported."); + return ret; + } + /* Init */ memset(&filter_replace, 0, sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); @@ -10811,7 +11974,7 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) /* create L1 filter */ filter_replace.old_filter_type = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN; - filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10; filter_replace.tr_bit = 0; /* Prepare the buffer, 2 entries */ @@ -10832,6 +11995,10 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) &filter_replace_buf); if (ret != I40E_SUCCESS) return ret; + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud l1 type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); /* Apply the second L2 cloud filter */ memset(&filter_replace, 0, @@ -10842,28 +12009,115 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) /* create L2 filter, input for L2 filter will be L1 filter */ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER; filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP; - filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10; /* Prepare the buffer, 2 entries */ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; filter_replace_buf.data[0] |= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; - filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10; filter_replace_buf.data[4] |= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; ret = i40e_aq_replace_cloud_filters(hw, &filter_replace, &filter_replace_buf); + if (!ret) { + i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER); + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud filter type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); + } return ret; } +int +i40e_config_rss_filter(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf, bool add) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t i, lut = 0; + uint16_t j, num; + struct rte_eth_rss_conf rss_conf = conf->rss_conf; + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; + + if (!add) { + if (memcmp(conf, rss_info, + sizeof(struct i40e_rte_flow_rss_conf)) == 0) { + i40e_pf_disable_rss(pf); + memset(rss_info, 0, + sizeof(struct i40e_rte_flow_rss_conf)); + return 0; + } + return -EINVAL; + } + + if (rss_info->num) + return -EINVAL; + + /* If both VMDQ and RSS enabled, not all of PF queues are configured. + * It's necessary to calculate the actual PF queues that are configured. + */ + if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) + num = i40e_pf_calc_configured_queues_num(pf); + else + num = pf->dev_data->nb_rx_queues; + + num = RTE_MIN(num, conf->num); + PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured", + num); + + if (num == 0) { + PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS"); + return -ENOTSUP; + } + + /* Fill in redirection table */ + for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { + if (j == num) + j = 0; + lut = (lut << 8) | (conf->queue[j] & ((0x1 << + hw->func_caps.rss_table_entry_width) - 1)); + if ((i & 3) == 3) + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut); + } + + if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) { + i40e_pf_disable_rss(pf); + return 0; + } + if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < + (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) { + /* Random default keys */ + static uint32_t rss_key_default[] = {0x6b793944, + 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8, + 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605, + 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581}; + + rss_conf.rss_key = (uint8_t *)rss_key_default; + rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + } + + i40e_hw_rss_hash_set(pf, &rss_conf); + + rte_memcpy(rss_info, + conf, sizeof(struct i40e_rte_flow_rss_conf)); + + return 0; +} + RTE_INIT(i40e_init_log); static void i40e_init_log(void) { - i40e_logtype_init = rte_log_register("pmd.i40e.init"); + i40e_logtype_init = rte_log_register("pmd.net.i40e.init"); if (i40e_logtype_init >= 0) rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE); - i40e_logtype_driver = rte_log_register("pmd.i40e.driver"); + i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver"); if (i40e_logtype_driver >= 0) rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE); } + +RTE_PMD_REGISTER_PARAM_STRING(net_i40e, + QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16" + ETH_I40E_SUPPORT_MULTI_DRIVER "=1");