X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev.c;h=720f067d6e8ea167f1abd302d30830596fbc0acb;hb=0e7ba7326f2d8f8e8a6286bbd96e0a361455448b;hp=4492bcc1295d7575bc56f473a47215790512ed0c;hpb=f2462150ec92a846180df7e3f038214b731439ce;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 4492bcc129..720f067d6e 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -40,10 +40,12 @@ #include #include +#include #include #include #include #include +#include #include #include #include @@ -63,7 +65,6 @@ #include "i40e_rxtx.h" #include "i40e_pf.h" #include "i40e_regs.h" -#include "rte_pmd_i40e.h" #define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb" #define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list" @@ -73,7 +74,7 @@ /* Maximun number of capability elements */ #define I40E_MAX_CAP_ELE_NUM 128 -/* Wait count and inteval */ +/* Wait count and interval */ #define I40E_CHK_Q_ENA_COUNT 1000 #define I40E_CHK_Q_ENA_INTERVAL_US 1000 @@ -85,12 +86,6 @@ /* Flow control default timer */ #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU -/* Flow control default high water */ -#define I40E_DEFAULT_HIGH_WATER (0x1C40/1024) - -/* Flow control default low water */ -#define I40E_DEFAULT_LOW_WATER (0x1A40/1024) - /* Flow control enable fwd bit */ #define I40E_PRTMAC_FWD_CTRL 0x00000001 @@ -100,6 +95,12 @@ /* Kilobytes shift */ #define I40E_KILOSHIFT 10 +/* Flow control default high water */ +#define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT) + +/* Flow control default low water */ +#define I40E_DEFAULT_LOW_WATER (0xF2000 >> I40E_KILOSHIFT) + /* Receive Average Packet Size in Byte*/ #define I40E_PACKET_AVERAGE_SIZE 128 @@ -249,6 +250,7 @@ static int i40e_dev_configure(struct rte_eth_dev *dev); static int i40e_dev_start(struct rte_eth_dev *dev); static void i40e_dev_stop(struct rte_eth_dev *dev); static void i40e_dev_close(struct rte_eth_dev *dev); +static int i40e_dev_reset(struct rte_eth_dev *dev); static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev); static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev); static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev); @@ -290,10 +292,10 @@ static int i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf); -static void i40e_macaddr_add(struct rte_eth_dev *dev, - struct ether_addr *mac_addr, - uint32_t index, - uint32_t pool); +static int i40e_macaddr_add(struct rte_eth_dev *dev, + struct ether_addr *mac_addr, + uint32_t index, + uint32_t pool); static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index); static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, @@ -318,8 +320,7 @@ static void i40e_stat_update_48(struct i40e_hw *hw, uint64_t *offset, uint64_t *stat); static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue); -static void i40e_dev_interrupt_handler(struct rte_intr_handle *handle, - void *param); +static void i40e_dev_interrupt_handler(void *param); static int i40e_res_pool_init(struct i40e_res_pool_info *pool, uint32_t base, uint32_t num); static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool); @@ -333,10 +334,6 @@ static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi); static int i40e_pf_config_mq_rx(struct i40e_pf *pf); static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on); -static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, - struct i40e_macvlan_filter *mv_f, - int num, - struct ether_addr *addr); static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi, struct i40e_macvlan_filter *mv_f, int num, @@ -410,14 +407,19 @@ static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf, struct i40e_ethertype_filter *filter); static int i40e_tunnel_filter_convert( - struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter, + struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter, struct i40e_tunnel_filter *tunnel_filter); static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf, struct i40e_tunnel_filter *tunnel_filter); +static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf); static void i40e_ethertype_filter_restore(struct i40e_pf *pf); static void i40e_tunnel_filter_restore(struct i40e_pf *pf); static void i40e_filter_restore(struct i40e_pf *pf); +static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev); + +int i40e_logtype_init; +int i40e_logtype_driver; static const struct rte_pci_id pci_id_i40e_map[] = { { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) }, @@ -448,6 +450,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .dev_start = i40e_dev_start, .dev_stop = i40e_dev_stop, .dev_close = i40e_dev_close, + .dev_reset = i40e_dev_reset, .promiscuous_enable = i40e_dev_promiscuous_enable, .promiscuous_disable = i40e_dev_promiscuous_disable, .allmulticast_enable = i40e_dev_allmulticast_enable, @@ -479,6 +482,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .rx_queue_release = i40e_dev_rx_queue_release, .rx_queue_count = i40e_dev_rx_queue_count, .rx_descriptor_done = i40e_dev_rx_descriptor_done, + .rx_descriptor_status = i40e_dev_rx_descriptor_status, + .tx_descriptor_status = i40e_dev_tx_descriptor_status, .tx_queue_setup = i40e_dev_tx_queue_setup, .tx_queue_release = i40e_dev_tx_queue_release, .dev_led_on = i40e_dev_led_on, @@ -512,6 +517,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .get_eeprom = i40e_get_eeprom, .mac_addr_set = i40e_set_default_mac_addr, .mtu_set = i40e_dev_mtu_set, + .tm_ops_get = i40e_tm_ops_get, }; /* store statistics names and its offset in stats structure */ @@ -627,16 +633,23 @@ static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = { #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \ sizeof(rte_i40e_txq_prio_strings[0])) -static struct eth_driver rte_i40e_pmd = { - .pci_drv = { - .id_table = pci_id_i40e_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, - .probe = rte_eth_dev_pci_probe, - .remove = rte_eth_dev_pci_remove, - }, - .eth_dev_init = eth_i40e_dev_init, - .eth_dev_uninit = eth_i40e_dev_uninit, - .dev_private_size = sizeof(struct i40e_adapter), +static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct i40e_adapter), eth_i40e_dev_init); +} + +static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit); +} + +static struct rte_pci_driver rte_i40e_pmd = { + .id_table = pci_id_i40e_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_i40e_pci_probe, + .remove = eth_i40e_pci_remove, }; static inline int @@ -667,9 +680,9 @@ rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev, return 0; } -RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv); +RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map); -RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio"); +RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci"); #ifndef I40E_GLQF_ORT #define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) @@ -677,6 +690,9 @@ RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio"); #ifndef I40E_GLQF_PIT #define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) #endif +#ifndef I40E_GLQF_L3_MAP +#define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4)) +#endif static inline void i40e_GLQF_reg_init(struct i40e_hw *hw) { @@ -866,7 +882,7 @@ is_floating_veb_supported(struct rte_devargs *devargs) static void config_floating_veb(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -899,12 +915,14 @@ i40e_init_ethtype_filter_list(struct rte_eth_dev *dev) .entries = I40E_MAX_ETHERTYPE_FILTER_NUM, .key_len = sizeof(struct i40e_ethertype_filter_input), .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), }; /* Initialize ethertype filter rule list and hash */ TAILQ_INIT(ðertype_rule->ethertype_list); snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE, - "ethertype_%s", dev->data->name); + "ethertype_%s", dev->device->name); ethertype_rule->hash_table = rte_hash_create(ðertype_hash_params); if (!ethertype_rule->hash_table) { PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!"); @@ -942,12 +960,14 @@ i40e_init_tunnel_filter_list(struct rte_eth_dev *dev) .entries = I40E_MAX_TUNNEL_FILTER_NUM, .key_len = sizeof(struct i40e_tunnel_filter_input), .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), }; /* Initialize tunnel filter rule list and hash */ TAILQ_INIT(&tunnel_rule->tunnel_list); snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE, - "tunnel_%s", dev->data->name); + "tunnel_%s", dev->device->name); tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params); if (!tunnel_rule->hash_table) { PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!"); @@ -985,12 +1005,14 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev) .entries = I40E_MAX_FDIR_FILTER_NUM, .key_len = sizeof(struct rte_eth_fdir_input), .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), }; /* Initialize flow director filter rule list and hash */ TAILQ_INIT(&fdir_info->fdir_list); snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, - "fdir_%s", dev->data->name); + "fdir_%s", dev->device->name); fdir_info->hash_table = rte_hash_create(&fdir_hash_params); if (!fdir_info->hash_table) { PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); @@ -1041,11 +1063,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) i40e_set_tx_function(dev); return 0; } - pci_dev = I40E_DEV_TO_PCI(dev); + i40e_set_default_ptype_table(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); intr_handle = &pci_dev->intr_handle; rte_eth_copy_pci_info(dev, pci_dev); - dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE; + dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); pf->adapter->eth_dev = dev; @@ -1112,15 +1135,18 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) ((hw->nvm.version >> 4) & 0xff), (hw->nvm.version & 0xf), hw->nvm.eetrack); + /* initialise the L3_MAP register */ + ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40), + 0x00000028, NULL); + if (ret) + PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", ret); + /* Need the special FW version to support floating VEB */ config_floating_veb(dev); /* Clear PXE mode */ i40e_clear_pxe_mode(hw); - ret = i40e_dev_sync_phy_type(hw); - if (ret) { - PMD_INIT_LOG(ERR, "Failed to sync phy type: %d", ret); - goto err_sync_phy_type; - } + i40e_dev_sync_phy_type(hw); + /* * On X710, performance number is far from the expectation on recent * firmware versions. The fix for this issue may not be integrated in @@ -1235,6 +1261,15 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) ether_addr_copy((struct ether_addr *)hw->mac.perm_addr, &dev->data->mac_addrs[0]); + /* Init dcb to sw mode by default */ + ret = i40e_dcb_init_configure(dev, TRUE); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(INFO, "Failed to init dcb."); + pf->flags &= ~I40E_FLAG_DCB; + } + /* Update HW struct after DCB configuration */ + i40e_get_cap(hw); + /* initialize pf host driver to setup SRIOV resource if applicable */ i40e_pf_host_init(dev); @@ -1263,12 +1298,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* initialize mirror rule list */ TAILQ_INIT(&pf->mirror_list); - /* Init dcb to sw mode by default */ - ret = i40e_dcb_init_configure(dev, TRUE); - if (ret != I40E_SUCCESS) { - PMD_INIT_LOG(INFO, "Failed to init dcb."); - pf->flags &= ~I40E_FLAG_DCB; - } + /* initialize Traffic Manager configuration */ + i40e_tm_conf_init(dev); ret = i40e_init_ethtype_filter_list(dev); if (ret < 0) @@ -1303,7 +1334,6 @@ err_msix_pool_init: err_qp_pool_init: err_parameter_init: err_get_capabilities: -err_sync_phy_type: (void)i40e_shutdown_adminq(hw); return ret; @@ -1386,7 +1416,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - pci_dev = I40E_DEV_TO_PCI(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); intr_handle = &pci_dev->intr_handle; if (hw->adapter_stopped == 0) @@ -1433,6 +1463,9 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) rte_free(p_flow); } + /* Remove all Traffic Manager configuration */ + i40e_tm_conf_uninit(dev); + return 0; } @@ -1442,9 +1475,14 @@ i40e_dev_configure(struct rte_eth_dev *dev) struct i40e_adapter *ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; int i, ret; + ret = i40e_dev_sync_phy_type(hw); + if (ret) + return ret; + /* Initialize to TRUE. If any of Rx queues doesn't meet the * bulk allocation or vector Rx preconditions we will reset it. */ @@ -1519,7 +1557,7 @@ void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t msix_vect = vsi->msix_intr; @@ -1558,7 +1596,8 @@ i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi) static void __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, - int base_queue, int nb_queue) + int base_queue, int nb_queue, + uint16_t itr_idx) { int i; uint32_t val; @@ -1567,7 +1606,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, /* Bind all RX queues to allocated MSIX interrupt */ for (i = 0; i < nb_queue; i++) { val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | - I40E_QINT_RQCTL_ITR_INDX_MASK | + itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT | ((base_queue + i + 1) << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | @@ -1630,10 +1669,10 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, } void -i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) +i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t msix_vect = vsi->msix_intr; @@ -1658,7 +1697,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) /* VF bind interrupt */ if (vsi->type == I40E_VSI_SRIOV) { __vsi_queues_bind_intr(vsi, msix_vect, - vsi->base_queue, vsi->nb_qps); + vsi->base_queue, vsi->nb_qps, + itr_idx); return; } @@ -1684,7 +1724,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) /* no enough msix_vect, map all to one */ __vsi_queues_bind_intr(vsi, msix_vect, vsi->base_queue + i, - vsi->nb_used_qps - i); + vsi->nb_used_qps - i, + itr_idx); for (; !!record && i < vsi->nb_used_qps; i++) intr_handle->intr_vec[queue_idx + i] = msix_vect; @@ -1692,7 +1733,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) } /* 1:1 queue/msix_vect mapping */ __vsi_queues_bind_intr(vsi, msix_vect, - vsi->base_queue + i, 1); + vsi->base_queue + i, 1, + itr_idx); if (!!record) intr_handle->intr_vec[queue_idx + i] = msix_vect; @@ -1705,7 +1747,7 @@ static void i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t interval = i40e_calc_itr_interval(\ @@ -1737,7 +1779,7 @@ static void i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t msix_intr, i; @@ -1778,11 +1820,15 @@ i40e_parse_link_speeds(uint16_t link_speeds) static int i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, - uint8_t force_speed) + uint8_t force_speed, + bool is_up) { enum i40e_status_code status; struct i40e_aq_get_phy_abilities_resp phy_ab; struct i40e_aq_set_phy_config phy_conf; + enum i40e_aq_phy_type cnt; + uint32_t phy_type_mask = 0; + const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX | I40E_AQ_PHY_FLAG_PAUSE_RX | I40E_AQ_PHY_FLAG_PAUSE_RX | @@ -1800,6 +1846,10 @@ i40e_phy_conf_link(struct i40e_hw *hw, if (status) return ret; + /* If link already up, no need to set up again */ + if (is_up && phy_ab.phy_type != 0) + return I40E_SUCCESS; + memset(&phy_conf, 0, sizeof(phy_conf)); /* bits 0-2 use the values from get_phy_abilities_resp */ @@ -1810,13 +1860,21 @@ i40e_phy_conf_link(struct i40e_hw *hw, if (abilities & I40E_AQ_PHY_AN_ENABLED) phy_conf.link_speed = advt; else - phy_conf.link_speed = force_speed; + phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed; phy_conf.abilities = abilities; + + + /* To enable link, phy_type mask needs to include each type */ + for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++) + phy_type_mask |= 1 << cnt; + /* use get_phy_abilities_resp value for the rest */ - phy_conf.phy_type = phy_ab.phy_type; - phy_conf.phy_type_ext = phy_ab.phy_type_ext; + phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0; + phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR | + I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR | + I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0; phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info; phy_conf.eee_capability = phy_ab.eee_capability; phy_conf.eeer = phy_ab.eeer_val; @@ -1848,13 +1906,7 @@ i40e_apply_link_speed(struct rte_eth_dev *dev) abilities |= I40E_AQ_PHY_AN_ENABLED; abilities |= I40E_AQ_PHY_LINK_ENABLED; - /* Skip changing speed on 40G interfaces, FW does not support */ - if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) { - speed = I40E_LINK_SPEED_UNKNOWN; - abilities |= I40E_AQ_PHY_AN_ENABLED; - } - - return i40e_phy_conf_link(hw, abilities, speed); + return i40e_phy_conf_link(hw, abilities, speed, true); } static int @@ -1864,9 +1916,10 @@ i40e_dev_start(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *main_vsi = pf->main_vsi; int ret, i; - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; + struct i40e_vsi *vsi; hw->adapter_stopped = 0; @@ -1910,19 +1963,21 @@ i40e_dev_start(struct rte_eth_dev *dev) /* Map queues with MSIX interrupt */ main_vsi->nb_used_qps = dev->data->nb_rx_queues - pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; - i40e_vsi_queues_bind_intr(main_vsi); + i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT); i40e_vsi_enable_queues_intr(main_vsi); /* Map VMDQ VSI queues with MSIX interrupt */ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; - i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi); + i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi, + I40E_ITR_INDEX_DEFAULT); i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi); } /* enable FDIR MSIX interrupt */ if (pf->fdir.fdir_vsi) { - i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi); + i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi, + I40E_ITR_INDEX_NONE); i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi); } @@ -1945,6 +2000,15 @@ i40e_dev_start(struct rte_eth_dev *dev) PMD_DRV_LOG(INFO, "fail to set vsi broadcast"); } + /* Enable the VLAN promiscuous mode. */ + if (pf->vfs) { + for (i = 0; i < pf->vf_num; i++) { + vsi = pf->vfs[i].vsi; + i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid, + true, NULL); + } + } + /* Apply link configure */ if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | @@ -1970,7 +2034,7 @@ i40e_dev_start(struct rte_eth_dev *dev) if (dev->data->dev_conf.intr_conf.lsc != 0) PMD_INIT_LOG(INFO, "lsc won't enable because of no intr multiplex"); - } else if (dev->data->dev_conf.intr_conf.lsc != 0) { + } else { ret = i40e_aq_set_phy_int_mask(hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL | @@ -1978,7 +2042,7 @@ i40e_dev_start(struct rte_eth_dev *dev) if (ret != I40E_SUCCESS) PMD_DRV_LOG(WARNING, "Fail to set phy mask"); - /* Call get_link_info aq commond to enable LSE */ + /* Call get_link_info aq commond to enable/disable LSE */ i40e_dev_link_update(dev, 0); } @@ -1987,6 +2051,11 @@ i40e_dev_start(struct rte_eth_dev *dev) i40e_filter_restore(pf); + if (pf->tm_conf.root && !pf->tm_conf.committed) + PMD_DRV_LOG(WARNING, + "please call hierarchy_commit() " + "before starting the port"); + return I40E_SUCCESS; err_up: @@ -2000,12 +2069,15 @@ static void i40e_dev_stop(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *main_vsi = pf->main_vsi; struct i40e_mirror_rule *p_mirror; - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int i; + if (hw->adapter_stopped == 1) + return; /* Disable all queues */ i40e_dev_switch_queues(pf, FALSE); @@ -2047,6 +2119,11 @@ i40e_dev_stop(struct rte_eth_dev *dev) rte_free(intr_handle->intr_vec); intr_handle->intr_vec = NULL; } + + /* reset hierarchy commit */ + pf->tm_conf.committed = false; + + hw->adapter_stopped = 1; } static void @@ -2054,7 +2131,7 @@ i40e_dev_close(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t reg; int i; @@ -2062,7 +2139,6 @@ i40e_dev_close(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); i40e_dev_stop(dev); - hw->adapter_stopped = 1; i40e_dev_free_queues(dev); /* Disable interrupt */ @@ -2097,6 +2173,32 @@ i40e_dev_close(struct rte_eth_dev *dev) I40E_WRITE_FLUSH(hw); } +/* + * Reset PF device only to re-initialize resources in PMD layer + */ +static int +i40e_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + /* When a DPDK PMD PF begin to reset PF port, it should notify all + * its VF to make them align with it. The detailed notification + * mechanism is PMD specific. As to i40e PF, it is rather complex. + * To avoid unexpected behavior in VF, currently reset of PF with + * SR-IOV activation is not supported. It might be supported later. + */ + if (dev->data->sriov.active) + return -ENOTSUP; + + ret = eth_i40e_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_i40e_dev_init(dev); + + return ret; +} + static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev) { @@ -2187,7 +2289,7 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK; - return i40e_phy_conf_link(hw, abilities, speed); + return i40e_phy_conf_link(hw, abilities, speed, false); } int @@ -2220,11 +2322,11 @@ i40e_dev_link_update(struct rte_eth_dev *dev, } link.link_status = link_status.link_info & I40E_AQ_LINK_UP; - if (!wait_to_complete) + if (!wait_to_complete || link.link_status) break; rte_delay_ms(CHECK_INTERVAL); - } while (!link.link_status && rep_cnt--); + } while (--rep_cnt); if (!link.link_status) goto out; @@ -2265,6 +2367,8 @@ out: if (link.link_status == old.link_status) return -1; + i40e_notify_all_vfs_link_status(dev); + return 0; } @@ -2289,6 +2393,10 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi) i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx), vsi->offset_loaded, &oes->rx_broadcast, &nes->rx_broadcast); + /* exclude CRC bytes */ + nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast + + nes->rx_broadcast) * ETHER_CRC_LEN; + i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded, &oes->rx_discards, &nes->rx_discards); /* GLV_REPC not supported */ @@ -2339,6 +2447,40 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */ struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */ + /* Get rx/tx bytes of internal transfer packets */ + i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port), + I40E_GLV_GORCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_bytes, + &pf->internal_stats.rx_bytes); + + i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port), + I40E_GLV_GOTCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.tx_bytes, + &pf->internal_stats.tx_bytes); + /* Get total internal rx packet count */ + i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port), + I40E_GLV_UPRCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_unicast, + &pf->internal_stats.rx_unicast); + i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port), + I40E_GLV_MPRCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_multicast, + &pf->internal_stats.rx_multicast); + i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port), + I40E_GLV_BPRCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_broadcast, + &pf->internal_stats.rx_broadcast); + + /* exclude CRC size */ + pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast + + pf->internal_stats.rx_multicast + + pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN; + /* Get statistics of struct i40e_eth_stats */ i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port), I40E_GLPRT_GORCL(hw->port), @@ -2362,6 +2504,16 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast + ns->eth.rx_broadcast) * ETHER_CRC_LEN; + /* Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before + * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negtive + * value. + */ + if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes) + ns->eth.rx_bytes = 0; + /* exlude internal rx bytes */ + else + ns->eth.rx_bytes -= pf->internal_stats.rx_bytes; + i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port), pf->offset_loaded, &os->eth.rx_discards, &ns->eth.rx_discards); @@ -2389,6 +2541,13 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) &ns->eth.tx_broadcast); ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast + ns->eth.tx_broadcast) * ETHER_CRC_LEN; + + /* exclude internal tx bytes */ + if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes) + ns->eth.tx_bytes = 0; + else + ns->eth.tx_bytes -= pf->internal_stats.tx_bytes; + /* GLPRT_TEPC not supported */ /* additional port specific stats */ @@ -2539,13 +2698,14 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* call read registers - updates values, now write them to struct */ i40e_read_stats_registers(pf, hw); - stats->ipackets = pf->main_vsi->eth_stats.rx_unicast + - pf->main_vsi->eth_stats.rx_multicast + - pf->main_vsi->eth_stats.rx_broadcast - + stats->ipackets = ns->eth.rx_unicast + + ns->eth.rx_multicast + + ns->eth.rx_broadcast - + ns->eth.rx_discards - pf->main_vsi->eth_stats.rx_discards; - stats->opackets = pf->main_vsi->eth_stats.tx_unicast + - pf->main_vsi->eth_stats.tx_multicast + - pf->main_vsi->eth_stats.tx_broadcast; + stats->opackets = ns->eth.tx_unicast + + ns->eth.tx_multicast + + ns->eth.tx_broadcast; stats->ibytes = ns->eth.rx_bytes; stats->obytes = ns->eth.tx_bytes; stats->oerrors = ns->eth.tx_errors + @@ -2809,7 +2969,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *vsi = pf->main_vsi; - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = vsi->nb_qps; @@ -2913,71 +3073,93 @@ i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) } static int -i40e_vlan_tpid_set(struct rte_eth_dev *dev, - enum rte_vlan_type vlan_type, - uint16_t tpid) +i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid, int qinq) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint64_t reg_r = 0, reg_w = 0; - uint16_t reg_id = 0; - int ret = 0; - int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend; + uint64_t reg_r = 0; + uint64_t reg_w = 0; + uint16_t reg_id = 3; + int ret; - switch (vlan_type) { - case ETH_VLAN_TYPE_OUTER: - if (qinq) + if (qinq) { + if (vlan_type == ETH_VLAN_TYPE_OUTER) reg_id = 2; - else - reg_id = 3; - break; - case ETH_VLAN_TYPE_INNER: - if (qinq) - reg_id = 3; - else { - ret = -EINVAL; - PMD_DRV_LOG(ERR, - "Unsupported vlan type in single vlan."); - return ret; - } - break; - default: - ret = -EINVAL; - PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type); - return ret; } + ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), ®_r, NULL); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]", reg_id); - ret = -EIO; - return ret; + return -EIO; } PMD_DRV_LOG(DEBUG, - "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64, - reg_id, reg_r); + "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64, + reg_id, reg_r); reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK)); reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT); if (reg_r == reg_w) { - ret = 0; PMD_DRV_LOG(DEBUG, "No need to write"); - return ret; + return 0; } ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), reg_w, NULL); if (ret != I40E_SUCCESS) { - ret = -EIO; PMD_DRV_LOG(ERR, - "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]", - reg_id); - return ret; + "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]", + reg_id); + return -EIO; } PMD_DRV_LOG(DEBUG, - "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]", - reg_w, reg_id); + "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]", + reg_w, reg_id); + + return 0; +} + +static int +i40e_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend; + int ret = 0; + + if ((vlan_type != ETH_VLAN_TYPE_INNER && + vlan_type != ETH_VLAN_TYPE_OUTER) || + (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) { + PMD_DRV_LOG(ERR, + "Unsupported vlan type."); + return -EINVAL; + } + /* 802.1ad frames ability is added in NVM API 1.7*/ + if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { + if (qinq) { + if (vlan_type == ETH_VLAN_TYPE_OUTER) + hw->first_tag = rte_cpu_to_le_16(tpid); + else if (vlan_type == ETH_VLAN_TYPE_INNER) + hw->second_tag = rte_cpu_to_le_16(tpid); + } else { + if (vlan_type == ETH_VLAN_TYPE_OUTER) + hw->second_tag = rte_cpu_to_le_16(tpid); + } + ret = i40e_aq_set_switch_config(hw, 0, 0, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Set switch config failed aq_err: %d", + hw->aq.asq_last_status); + ret = -EIO; + } + } else + /* If NVM API < 1.7, keep the register setting */ + ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type, + tpid, qinq); return ret; } @@ -3006,7 +3188,7 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) if (mask & ETH_VLAN_EXTEND_MASK) { if (dev->data->dev_conf.rxmode.hw_vlan_extend) { i40e_vsi_config_double_vlan(vsi, TRUE); - /* Set global registers with default ether type value */ + /* Set global registers with default ethertype. */ i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN); i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER, @@ -3078,6 +3260,13 @@ i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); fc_conf->pause_time = pf->fc_conf.pause_time; + + /* read out from register, in case they are modified by other port */ + pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = + I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT; + pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = + I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT; + fc_conf->high_water = pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]; fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]; @@ -3227,7 +3416,7 @@ i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev, } /* Add a MAC address, and update filters */ -static void +static int i40e_macaddr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, __rte_unused uint32_t index, @@ -3244,16 +3433,16 @@ i40e_macaddr_add(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u", pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled", pool); - return; + return -ENOTSUP; } if (pool > pf->nb_cfg_vmdq_vsi) { PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u", pool, pf->nb_cfg_vmdq_vsi); - return; + return -EINVAL; } - (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN); + rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN); if (dev->data->dev_conf.rxmode.hw_vlan_filter) mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; else @@ -3267,8 +3456,9 @@ i40e_macaddr_add(struct rte_eth_dev *dev, ret = i40e_vsi_add_mac(vsi, &mac_filter); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter"); - return; + return -ENODEV; } + return 0; } /* Remove a MAC address, and update filters */ @@ -3357,10 +3547,10 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf, } if (add) { - (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN); - (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes, + rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN); + rte_memcpy(hw->mac.addr, new_mac->addr_bytes, ETHER_ADDR_LEN); - (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr, + rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr, ETHER_ADDR_LEN); mac_filter.filter_type = filter->filter_type; @@ -3371,7 +3561,7 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf, } ether_addr_copy(new_mac, &pf->dev_addr); } else { - (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, + rte_memcpy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr); if (ret != I40E_SUCCESS) { @@ -3727,7 +3917,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); uint16_t qp_count = 0, vsi_count = 0; if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) { @@ -4163,7 +4353,7 @@ i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi, vsi->info.valid_sections = rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); memset(&ctxt, 0, sizeof(ctxt)); - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); ctxt.seid = vsi->seid; hw = I40E_VSI_TO_HW(vsi); @@ -4202,7 +4392,7 @@ i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap) return ret; } - (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles, + rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles, sizeof(vsi->info.qs_handle)); return I40E_SUCCESS; } @@ -4223,6 +4413,8 @@ i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi, for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) if (enabled_tcmap & (1 << i)) total_tc++; + if (total_tc == 0) + total_tc = 1; vsi->enabled_tc = enabled_tcmap; /* Number of queues per enabled TC */ @@ -4339,6 +4531,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi) hw->aq.asq_last_status); goto fail; } + veb->enabled_tc = I40E_DEFAULT_TCMAP; /* get statistics index */ ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL, @@ -4456,7 +4649,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) if (vsi->type != I40E_VSI_MAIN) return I40E_ERR_CONFIG; memset(&def_filter, 0, sizeof(def_filter)); - (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr, + rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr, ETH_ADDR_LEN); def_filter.vlan_tag = 0; def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | @@ -4466,8 +4659,8 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) struct i40e_mac_filter *f; struct ether_addr *mac; - PMD_DRV_LOG(WARNING, - "Cannot remove the default macvlan filter"); + PMD_DRV_LOG(DEBUG, + "Cannot remove the default macvlan filter"); /* It needs to add the permanent mac into mac list */ f = rte_zmalloc("macv_filter", sizeof(*f), 0); if (f == NULL) { @@ -4475,7 +4668,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) return I40E_ERR_NO_MEMORY; } mac = &f->mac_info.mac_addr; - (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr, + rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr, ETH_ADDR_LEN); f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH; TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); @@ -4483,7 +4676,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) return ret; } - (void)rte_memcpy(&filter.mac_addr, + rte_memcpy(&filter.mac_addr, (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN); filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; return i40e_vsi_add_mac(vsi, &filter); @@ -4659,6 +4852,7 @@ i40e_vsi_setup(struct i40e_pf *pf, vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi; vsi->user_param = user_param; vsi->vlan_anti_spoof_on = 0; + vsi->vlan_filter_on = 0; /* Allocate queues */ switch (vsi->type) { case I40E_VSI_MAIN : @@ -4743,7 +4937,7 @@ i40e_vsi_setup(struct i40e_pf *pf, PMD_DRV_LOG(ERR, "Failed to get VSI params"); goto fail_msix_alloc; } - (void)rte_memcpy(&vsi->info, &ctxt.info, + rte_memcpy(&vsi->info, &ctxt.info, sizeof(struct i40e_aqc_vsi_properties_data)); vsi->vsi_id = ctxt.vsi_number; vsi->info.valid_sections = 0; @@ -4761,7 +4955,7 @@ i40e_vsi_setup(struct i40e_pf *pf, rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; - (void)rte_memcpy(&ctxt.info, &vsi->info, + rte_memcpy(&ctxt.info, &vsi->info, sizeof(struct i40e_aqc_vsi_properties_data)); ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, I40E_DEFAULT_TCMAP); @@ -4782,15 +4976,15 @@ i40e_vsi_setup(struct i40e_pf *pf, goto fail_msix_alloc; } - (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, + rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, sizeof(vsi->info.tc_mapping)); - (void)rte_memcpy(&vsi->info.queue_mapping, + rte_memcpy(&vsi->info.queue_mapping, &ctxt.info.queue_mapping, sizeof(vsi->info.queue_mapping)); vsi->info.mapping_flags = ctxt.info.mapping_flags; vsi->info.valid_sections = 0; - (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr, + rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr, ETH_ADDR_LEN); /** @@ -4835,13 +5029,14 @@ i40e_vsi_setup(struct i40e_pf *pf, rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, - I40E_DEFAULT_TCMAP); + hw->func_caps.enabled_tcmap); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to configure TC queue mapping"); goto fail_msix_alloc; } - ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; + + ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap; ctxt.info.valid_sections |= rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID); /** @@ -4932,7 +5127,7 @@ i40e_vsi_setup(struct i40e_pf *pf, } /* MAC/VLAN configuration */ - (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN); + rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN); filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; ret = i40e_vsi_add_mac(vsi, &filter); @@ -5044,7 +5239,7 @@ i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on) vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK); vsi->info.port_vlan_flags |= vlan_flags; ctxt.seid = vsi->seid; - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping", @@ -5152,6 +5347,8 @@ i40e_pf_setup(struct i40e_pf *pf) pf->offset_loaded = FALSE; memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats)); memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats)); + memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats)); + memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats)); ret = i40e_pf_get_switch_config(pf); if (ret != I40E_SUCCESS) { @@ -5666,16 +5863,16 @@ i40e_dev_handle_vfr_event(struct rte_eth_dev *dev) index = abs_vf_id / I40E_UINT32_BIT_SIZE; offset = abs_vf_id % I40E_UINT32_BIT_SIZE; val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index)); - /* VFR event occured */ + /* VFR event occurred */ if (val & (0x1 << offset)) { int ret; /* Clear the event first */ I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index), (0x1 << offset)); - PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id); + PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id); /** - * Only notify a VF reset event occured, + * Only notify a VF reset event occurred, * don't trigger another SW reset */ ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0); @@ -5689,18 +5886,10 @@ static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct i40e_virtchnl_pf_event event; int i; - event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; - event.event_data.link_event.link_status = - dev->data->dev_link.link_status; - event.event_data.link_event.link_speed = - (enum i40e_aq_link_speed)dev->data->dev_link.link_speed; - for (i = 0; i < pf->vf_num; i++) - i40e_pf_host_send_msg_to_vf(&pf->vfs[i], I40E_VIRTCHNL_OP_EVENT, - I40E_SUCCESS, (uint8_t *)&event, sizeof(event)); + i40e_notify_vf_link_status(dev, &pf->vfs[i]); } static void @@ -5742,14 +5931,12 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) break; case i40e_aqc_opc_get_link_status: ret = i40e_dev_link_update(dev, 0); - if (!ret) { - i40e_notify_all_vfs_link_status(dev); + if (!ret) _rte_eth_dev_callback_process(dev, - RTE_ETH_EVENT_INTR_LSC, NULL); - } + RTE_ETH_EVENT_INTR_LSC, NULL, NULL); break; default: - PMD_DRV_LOG(ERR, "Request %u is not supported yet", + PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", opcode); break; } @@ -5770,8 +5957,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) * void */ static void -i40e_dev_interrupt_handler(struct rte_intr_handle *intr_handle, - void *param) +i40e_dev_interrupt_handler(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -5788,7 +5974,6 @@ i40e_dev_interrupt_handler(struct rte_intr_handle *intr_handle, PMD_DRV_LOG(INFO, "No interrupt event"); goto done; } -#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK) PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error"); if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) @@ -5803,7 +5988,6 @@ i40e_dev_interrupt_handler(struct rte_intr_handle *intr_handle, PMD_DRV_LOG(ERR, "ICR0: HMC error"); if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK) PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error"); -#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { PMD_DRV_LOG(INFO, "ICR0: VF reset detected"); @@ -5817,10 +6001,10 @@ i40e_dev_interrupt_handler(struct rte_intr_handle *intr_handle, done: /* Enable interrupt */ i40e_pf_enable_irq0(hw); - rte_intr_enable(intr_handle); + rte_intr_enable(dev->intr_handle); } -static int +int i40e_add_macvlan_filters(struct i40e_vsi *vsi, struct i40e_macvlan_filter *filter, int total) @@ -5849,7 +6033,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi, memset(req_list, 0, ele_buff_size); for (i = 0; i < actual_num; i++) { - (void)rte_memcpy(req_list[i].mac_addr, + rte_memcpy(req_list[i].mac_addr, &filter[num + i].macaddr, ETH_ADDR_LEN); req_list[i].vlan_tag = rte_cpu_to_le_16(filter[num + i].vlan_id); @@ -5894,7 +6078,7 @@ DONE: return ret; } -static int +int i40e_remove_macvlan_filters(struct i40e_vsi *vsi, struct i40e_macvlan_filter *filter, int total) @@ -5924,7 +6108,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi, memset(req_list, 0, ele_buff_size); for (i = 0; i < actual_num; i++) { - (void)rte_memcpy(req_list[i].mac_addr, + rte_memcpy(req_list[i].mac_addr, &filter[num + i].macaddr, ETH_ADDR_LEN); req_list[i].vlan_tag = rte_cpu_to_le_16(filter[num + i].vlan_id); @@ -6014,7 +6198,7 @@ i40e_store_vlan_filter(struct i40e_vsi *vsi, vsi->vfta[vid_idx] &= ~vid_bit; } -static void +void i40e_set_vlan_filter(struct i40e_vsi *vsi, uint16_t vlan_id, bool on) { @@ -6027,7 +6211,7 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi, i40e_store_vlan_filter(vsi, vlan_id, on); - if (!vsi->vlan_anti_spoof_on || !vlan_id) + if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id) return; vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id); @@ -6050,7 +6234,7 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi, * Find all vlan options for specific mac addr, * return with actual vlan found. */ -static inline int +int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, struct i40e_macvlan_filter *mv_f, int num, struct ether_addr *addr) @@ -6075,7 +6259,7 @@ i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, "vlan number doesn't match"); return I40E_ERR_PARAM; } - (void)rte_memcpy(&mv_f[i].macaddr, + rte_memcpy(&mv_f[i].macaddr, addr, ETH_ADDR_LEN); mv_f[i].vlan_id = j * I40E_UINT32_BIT_SIZE + k; @@ -6104,7 +6288,7 @@ i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi, PMD_DRV_LOG(ERR, "buffer number not match"); return I40E_ERR_PARAM; } - (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, + rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); mv_f[i].vlan_id = vlan; mv_f[i].filter_type = f->mac_info.filter_type; @@ -6140,7 +6324,7 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi) i = 0; if (vsi->vlan_num == 0) { TAILQ_FOREACH(f, &vsi->mac_list, next) { - (void)rte_memcpy(&mv_f[i].macaddr, + rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); mv_f[i].filter_type = f->mac_info.filter_type; mv_f[i].vlan_id = 0; @@ -6310,7 +6494,7 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) for (i = 0; i < vlan_num; i++) { mv_f[i].filter_type = mac_filter->filter_type; - (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr, + rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr, ETH_ADDR_LEN); } @@ -6333,7 +6517,7 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) ret = I40E_ERR_NO_MEMORY; goto DONE; } - (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr, + rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr, ETH_ADDR_LEN); f->mac_info.filter_type = mac_filter->filter_type; TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); @@ -6380,7 +6564,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr) for (i = 0; i < vlan_num; i++) { mv_f[i].filter_type = filter_type; - (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, + rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); } if (filter_type == RTE_MACVLAN_PERFECT_MATCH || @@ -6696,18 +6880,27 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag) /* Convert tunnel filter structure */ static int -i40e_tunnel_filter_convert(struct i40e_aqc_add_remove_cloud_filters_element_data - *cld_filter, - struct i40e_tunnel_filter *tunnel_filter) +i40e_tunnel_filter_convert( + struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter, + struct i40e_tunnel_filter *tunnel_filter) { - ether_addr_copy((struct ether_addr *)&cld_filter->outer_mac, + ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac, (struct ether_addr *)&tunnel_filter->input.outer_mac); - ether_addr_copy((struct ether_addr *)&cld_filter->inner_mac, + ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac, (struct ether_addr *)&tunnel_filter->input.inner_mac); - tunnel_filter->input.inner_vlan = cld_filter->inner_vlan; - tunnel_filter->input.flags = cld_filter->flags; - tunnel_filter->input.tenant_id = cld_filter->tenant_id; - tunnel_filter->queue = cld_filter->queue_number; + tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan; + if ((rte_le_to_cpu_16(cld_filter->element.flags) & + I40E_AQC_ADD_CLOUD_FLAGS_IPV6) == + I40E_AQC_ADD_CLOUD_FLAGS_IPV6) + tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6; + else + tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4; + tunnel_filter->input.flags = cld_filter->element.flags; + tunnel_filter->input.tenant_id = cld_filter->element.tenant_id; + tunnel_filter->queue = cld_filter->element.queue_number; + rte_memcpy(tunnel_filter->input.general_fields, + cld_filter->general_fields, + sizeof(cld_filter->general_fields)); return 0; } @@ -6786,40 +6979,44 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, int val, ret = 0; struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_vsi *vsi = pf->main_vsi; - struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter; - struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter; + struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter; + struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter; struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; struct i40e_tunnel_filter *tunnel, *node; struct i40e_tunnel_filter check_filter; /* Check if filter exists */ cld_filter = rte_zmalloc("tunnel_filter", - sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data), - 0); + sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext), + 0); if (NULL == cld_filter) { PMD_DRV_LOG(ERR, "Failed to alloc memory."); - return -EINVAL; + return -ENOMEM; } pfilter = cld_filter; - ether_addr_copy(&tunnel_filter->outer_mac, (struct ether_addr*)&pfilter->outer_mac); - ether_addr_copy(&tunnel_filter->inner_mac, (struct ether_addr*)&pfilter->inner_mac); + ether_addr_copy(&tunnel_filter->outer_mac, + (struct ether_addr *)&pfilter->element.outer_mac); + ether_addr_copy(&tunnel_filter->inner_mac, + (struct ether_addr *)&pfilter->element.inner_mac); - pfilter->inner_vlan = rte_cpu_to_le_16(tunnel_filter->inner_vlan); + pfilter->element.inner_vlan = + rte_cpu_to_le_16(tunnel_filter->inner_vlan); if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr); - rte_memcpy(&pfilter->ipaddr.v4.data, + rte_memcpy(&pfilter->element.ipaddr.v4.data, &rte_cpu_to_le_32(ipv4_addr), - sizeof(pfilter->ipaddr.v4.data)); + sizeof(pfilter->element.ipaddr.v4.data)); } else { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; for (i = 0; i < 4; i++) { convert_ipv6[i] = rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i])); } - rte_memcpy(&pfilter->ipaddr.v6.data, &convert_ipv6, - sizeof(pfilter->ipaddr.v6.data)); + rte_memcpy(&pfilter->element.ipaddr.v6.data, + &convert_ipv6, + sizeof(pfilter->element.ipaddr.v6.data)); } /* check tunneled type */ @@ -6841,17 +7038,18 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, } val = i40e_dev_get_filter_type(tunnel_filter->filter_type, - &pfilter->flags); + &pfilter->element.flags); if (val < 0) { rte_free(cld_filter); return -EINVAL; } - pfilter->flags |= rte_cpu_to_le_16( + pfilter->element.flags |= rte_cpu_to_le_16( I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)); - pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); - pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id); + pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->element.queue_number = + rte_cpu_to_le_16(tunnel_filter->queue_id); /* Check if there is the filter in SW list */ memset(&check_filter, 0, sizeof(check_filter)); @@ -6868,20 +7066,21 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, } if (add) { - ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1); + ret = i40e_aq_add_cloud_filters(hw, + vsi->seid, &cld_filter->element, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to add a tunnel filter."); - return ret; + return -ENOTSUP; } tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); rte_memcpy(tunnel, &check_filter, sizeof(check_filter)); ret = i40e_sw_tunnel_filter_insert(pf, tunnel); } else { ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, - cld_filter, 1); + &cld_filter->element, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); - return ret; + return -ENOTSUP; } ret = i40e_sw_tunnel_filter_del(pf, &node->input); } @@ -6890,64 +7089,381 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, return ret; } -static int -i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port) -{ - uint8_t i; +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48 +#define I40E_TR_VXLAN_GRE_KEY_MASK 0x4 +#define I40E_TR_GENEVE_KEY_MASK 0x8 +#define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40 +#define I40E_TR_GRE_KEY_MASK 0x400 +#define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800 +#define I40E_TR_GRE_NO_KEY_MASK 0x8000 - for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { - if (pf->vxlan_ports[i] == port) - return i; - } +static enum +i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) +{ + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + enum i40e_status_code status = I40E_SUCCESS; + + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + /* create L1 filter */ + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS; + filter_replace.tr_bit = 0; + + /* Prepare the buffer, 3 entries */ + filter_replace_buf.data[0] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[2] = 0xFF; + filter_replace_buf.data[3] = 0xFF; + filter_replace_buf.data[4] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[7] = 0xF0; + filter_replace_buf.data[8] + = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0; + filter_replace_buf.data[8] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK | + I40E_TR_GENEVE_KEY_MASK | + I40E_TR_GENERIC_UDP_TUNNEL_MASK; + filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK | + I40E_TR_GRE_KEY_WITH_XSUM_MASK | + I40E_TR_GRE_NO_KEY_MASK) >> 8; + + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + return status; +} - return -1; +static enum +i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) +{ + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + enum i40e_status_code status = I40E_SUCCESS; + + /* For MPLSoUDP */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER | + I40E_AQC_MIRROR_CLOUD_FILTER; + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (status < 0) + return status; + + /* For MPLSoGRE */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER | + I40E_AQC_MIRROR_CLOUD_FILTER; + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + return status; } -static int -i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port) +int +i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, + struct i40e_tunnel_filter_conf *tunnel_filter, + uint8_t add) { - int idx, ret; - uint8_t filter_idx; + uint16_t ip_type; + uint32_t ipv4_addr; + uint8_t i, tun_type = 0; + /* internal variable to convert ipv6 byte order */ + uint32_t convert_ipv6[4]; + int val, ret = 0; + struct i40e_pf_vf *vf = NULL; struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi; + struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter; + struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter; + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; + struct i40e_tunnel_filter *tunnel, *node; + struct i40e_tunnel_filter check_filter; /* Check if filter exists */ + uint32_t teid_le; + bool big_buffer = 0; - idx = i40e_get_vxlan_port_idx(pf, port); + cld_filter = rte_zmalloc("tunnel_filter", + sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext), + 0); - /* Check if port already exists */ - if (idx >= 0) { - PMD_DRV_LOG(ERR, "Port %d already offloaded", port); - return -EINVAL; + if (cld_filter == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; } + pfilter = cld_filter; - /* Now check if there is space to add the new port */ - idx = i40e_get_vxlan_port_idx(pf, 0); - if (idx < 0) { - PMD_DRV_LOG(ERR, - "Maximum number of UDP ports reached, not adding port %d", - port); - return -ENOSPC; - } + ether_addr_copy(&tunnel_filter->outer_mac, + (struct ether_addr *)&pfilter->element.outer_mac); + ether_addr_copy(&tunnel_filter->inner_mac, + (struct ether_addr *)&pfilter->element.inner_mac); - ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN, - &filter_idx, NULL); - if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port); - return -1; + pfilter->element.inner_vlan = + rte_cpu_to_le_16(tunnel_filter->inner_vlan); + if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; + ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr); + rte_memcpy(&pfilter->element.ipaddr.v4.data, + &rte_cpu_to_le_32(ipv4_addr), + sizeof(pfilter->element.ipaddr.v4.data)); + } else { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; + for (i = 0; i < 4; i++) { + convert_ipv6[i] = + rte_cpu_to_le_32(rte_be_to_cpu_32( + tunnel_filter->ip_addr.ipv6_addr[i])); + } + rte_memcpy(&pfilter->element.ipaddr.v6.data, + &convert_ipv6, + sizeof(pfilter->element.ipaddr.v6.data)); } - PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d", - port, filter_idx); - - /* New port: add it and mark its index in the bitmap */ - pf->vxlan_ports[idx] = port; - pf->vxlan_bitmap |= (1 << idx); - - if (!(pf->flags & I40E_FLAG_VXLAN)) - pf->flags |= I40E_FLAG_VXLAN; - - return 0; -} - -static int + /* check tunneled type */ + switch (tunnel_filter->tunnel_type) { + case I40E_TUNNEL_TYPE_VXLAN: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN; + break; + case I40E_TUNNEL_TYPE_NVGRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC; + break; + case I40E_TUNNEL_TYPE_IP_IN_GRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP; + break; + case I40E_TUNNEL_TYPE_MPLSoUDP: + if (!pf->mpls_replace_flag) { + i40e_replace_mpls_l1_filter(pf); + i40e_replace_mpls_cloud_filter(pf); + pf->mpls_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] = + teid_le >> 4; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] = + (teid_le & 0xF) << 12; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] = + 0x40; + big_buffer = 1; + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP; + break; + case I40E_TUNNEL_TYPE_MPLSoGRE: + if (!pf->mpls_replace_flag) { + i40e_replace_mpls_l1_filter(pf); + i40e_replace_mpls_cloud_filter(pf); + pf->mpls_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] = + teid_le >> 4; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] = + (teid_le & 0xF) << 12; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] = + 0x0; + big_buffer = 1; + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE; + break; + case I40E_TUNNEL_TYPE_QINQ: + if (!pf->qinq_replace_flag) { + ret = i40e_cloud_filter_qinq_create(pf); + if (ret < 0) + PMD_DRV_LOG(DEBUG, + "QinQ tunnel filter already created."); + pf->qinq_replace_flag = 1; + } + /* Add in the General fields the values of + * the Outer and Inner VLAN + * Big Buffer should be set, see changes in + * i40e_aq_add_cloud_filters + */ + pfilter->general_fields[0] = tunnel_filter->inner_vlan; + pfilter->general_fields[1] = tunnel_filter->outer_vlan; + big_buffer = 1; + break; + default: + /* Other tunnel types is not supported. */ + PMD_DRV_LOG(ERR, "tunnel type is not supported."); + rte_free(cld_filter); + return -EINVAL; + } + + if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP) + pfilter->element.flags = + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP; + else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE) + pfilter->element.flags = + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE; + else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ) + pfilter->element.flags |= + I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + else { + val = i40e_dev_get_filter_type(tunnel_filter->filter_type, + &pfilter->element.flags); + if (val < 0) { + rte_free(cld_filter); + return -EINVAL; + } + } + + pfilter->element.flags |= rte_cpu_to_le_16( + I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | + ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)); + pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->element.queue_number = + rte_cpu_to_le_16(tunnel_filter->queue_id); + + if (!tunnel_filter->is_to_vf) + vsi = pf->main_vsi; + else { + if (tunnel_filter->vf_id >= pf->vf_num) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + vf = &pf->vfs[tunnel_filter->vf_id]; + vsi = vf->vsi; + } + + /* Check if there is the filter in SW list */ + memset(&check_filter, 0, sizeof(check_filter)); + i40e_tunnel_filter_convert(cld_filter, &check_filter); + check_filter.is_to_vf = tunnel_filter->is_to_vf; + check_filter.vf_id = tunnel_filter->vf_id; + node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input); + if (add && node) { + PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!"); + return -EINVAL; + } + + if (!add && !node) { + PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!"); + return -EINVAL; + } + + if (add) { + if (big_buffer) + ret = i40e_aq_add_cloud_filters_big_buffer(hw, + vsi->seid, cld_filter, 1); + else + ret = i40e_aq_add_cloud_filters(hw, + vsi->seid, &cld_filter->element, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add a tunnel filter."); + return -ENOTSUP; + } + tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); + rte_memcpy(tunnel, &check_filter, sizeof(check_filter)); + ret = i40e_sw_tunnel_filter_insert(pf, tunnel); + } else { + if (big_buffer) + ret = i40e_aq_remove_cloud_filters_big_buffer( + hw, vsi->seid, cld_filter, 1); + else + ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, + &cld_filter->element, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); + return -ENOTSUP; + } + ret = i40e_sw_tunnel_filter_del(pf, &node->input); + } + + rte_free(cld_filter); + return ret; +} + +static int +i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port) +{ + uint8_t i; + + for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { + if (pf->vxlan_ports[i] == port) + return i; + } + + return -1; +} + +static int +i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port) +{ + int idx, ret; + uint8_t filter_idx; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + + idx = i40e_get_vxlan_port_idx(pf, port); + + /* Check if port already exists */ + if (idx >= 0) { + PMD_DRV_LOG(ERR, "Port %d already offloaded", port); + return -EINVAL; + } + + /* Now check if there is space to add the new port */ + idx = i40e_get_vxlan_port_idx(pf, 0); + if (idx < 0) { + PMD_DRV_LOG(ERR, + "Maximum number of UDP ports reached, not adding port %d", + port); + return -ENOSPC; + } + + ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN, + &filter_idx, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port); + return -1; + } + + PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d", + port, filter_idx); + + /* New port: add it and mark its index in the bitmap */ + pf->vxlan_ports[idx] = port; + pf->vxlan_bitmap |= (1 << idx); + + if (!(pf->flags & I40E_FLAG_VXLAN)) + pf->flags |= I40E_FLAG_VXLAN; + + return 0; +} + +static int i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port) { int idx; @@ -7073,7 +7589,7 @@ i40e_pf_config_rss(struct i40e_pf *pf) /* * If both VMDQ and RSS enabled, not all of PF queues are configured. - * It's necessary to calulate the actual PF queues that are configured. + * It's necessary to calculate the actual PF queues that are configured. */ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) num = i40e_pf_calc_configured_queues_num(pf); @@ -7419,7 +7935,44 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw, pctype = i40e_flowtype_to_pctype(i); reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ? I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg); + if (hw->mac.type == I40E_MAC_X722) { + if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP), + reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP), + reg); + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK), + reg); + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP), + reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP), + reg); + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK), + reg); + } else { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), + reg); + } + } else { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg); + } } reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); @@ -7712,7 +8265,7 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, /** * Validate if the input set is allowed for a specific PCTYPE */ -static int +int i40e_validate_input_set(enum i40e_filter_pctype pctype, enum rte_filter_type filter, uint64_t inset) { @@ -7887,7 +8440,7 @@ i40e_parse_input_set(uint64_t *inset, * Translate the input set from bit masks to register aware bit masks * and vice versa */ -static uint64_t +uint64_t i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input) { uint64_t val = 0; @@ -7972,7 +8525,7 @@ i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input) return val; } -static int +int i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem) { uint8_t i, idx = 0; @@ -8020,7 +8573,7 @@ i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem) return idx; } -static void +void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) { uint32_t reg = i40e_read_rx_ctl(hw, addr); @@ -8588,11 +9141,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev, static void i40e_enable_extended_tag(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); uint32_t buf = 0; int ret; - ret = rte_eal_pci_read_config(pci_dev, &buf, sizeof(buf), + ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf), PCI_DEV_CAP_REG); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", @@ -8605,7 +9158,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev) } buf = 0; - ret = rte_eal_pci_read_config(pci_dev, &buf, sizeof(buf), + ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf), PCI_DEV_CTRL_REG); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", @@ -8617,7 +9170,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev) return; } buf |= PCI_DEV_CTRL_EXT_TAG_MASK; - ret = rte_eal_pci_write_config(pci_dev, &buf, sizeof(buf), + ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf), PCI_DEV_CTRL_REG); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x", @@ -8726,12 +9279,17 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) */ /* For both X710 and XL710 */ -#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200 -#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00 +#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200 +#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x20000200 +#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200 #define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08 +/* For X722 */ +#define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200 +#define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200 + /* For X710 */ #define I40E_GL_SWR_PM_UP_THR_EF_VALUE 0x03030303 /* For XL710 */ @@ -8744,17 +9302,25 @@ i40e_dev_sync_phy_type(struct i40e_hw *hw) enum i40e_status_code status; struct i40e_aq_get_phy_abilities_resp phy_ab; int ret = -ENOTSUP; + int retries = 0; status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab, NULL); - if (status) - return ret; - + while (status) { + PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d", + status); + retries++; + rte_delay_us(100000); + if (retries < 5) + status = i40e_aq_get_phy_capabilities(hw, false, + true, &phy_ab, NULL); + else + return ret; + } return 0; } - static void i40e_configure_registers(struct i40e_hw *hw) { @@ -8762,8 +9328,8 @@ i40e_configure_registers(struct i40e_hw *hw) uint32_t addr; uint64_t val; } reg_table[] = { - {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE}, - {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE}, + {I40E_GL_SWR_PRI_JOIN_MAP_0, 0}, + {I40E_GL_SWR_PRI_JOIN_MAP_2, 0}, {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */ }; uint64_t reg; @@ -8771,6 +9337,28 @@ i40e_configure_registers(struct i40e_hw *hw) int ret; for (i = 0; i < RTE_DIM(reg_table); i++) { + if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) { + if (hw->mac.type == I40E_MAC_X722) /* For X722 */ + reg_table[i].val = + I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE; + else /* For X710/XL710/XXV710 */ + if (hw->aq.fw_maj_ver < 6) + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1; + else + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2; + } + + if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) { + if (hw->mac.type == I40E_MAC_X722) /* For X722 */ + reg_table[i].val = + I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE; + else /* For X710/XL710/XXV710 */ + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE; + } + if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) { if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */ I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */ @@ -9702,9 +10290,9 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map) goto out; } /* update the local VSI info with updated queue map */ - (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, + rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, sizeof(vsi->info.tc_mapping)); - (void)rte_memcpy(&vsi->info.queue_mapping, + rte_memcpy(&vsi->info.queue_mapping, &ctxt.info.queue_mapping, sizeof(vsi->info.queue_mapping)); vsi->info.mapping_flags = ctxt.info.mapping_flags; @@ -9836,7 +10424,7 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret = 0; + int i, ret = 0; if ((pf->flags & I40E_FLAG_DCB) == 0) { PMD_INIT_LOG(ERR, "HW doesn't support DCB"); @@ -9863,6 +10451,9 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) hw->local_dcbx_config.etscfg.tcbwtable[0] = 100; hw->local_dcbx_config.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS; + /* all UPs mapping to TC0 */ + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + hw->local_dcbx_config.etscfg.prioritytable[i] = 0; hw->local_dcbx_config.etsrec = hw->local_dcbx_config.etscfg; hw->local_dcbx_config.pfc.willing = 0; @@ -10012,7 +10603,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev, static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t interval = @@ -10046,7 +10637,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t msix_intr; @@ -10169,8 +10760,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct rte_eth_dev_data *dev_data = pf->dev_data; - uint32_t frame_size = mtu + ETHER_HDR_LEN - + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE; + uint32_t frame_size = mtu + I40E_ETH_OVERHEAD; int ret = 0; /* check if mtu is within the allowed range */ @@ -10232,17 +10822,51 @@ static void i40e_tunnel_filter_restore(struct i40e_pf *pf) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct i40e_vsi *vsi = pf->main_vsi; + struct i40e_vsi *vsi; + struct i40e_pf_vf *vf; struct i40e_tunnel_filter_list *tunnel_list = &pf->tunnel.tunnel_list; struct i40e_tunnel_filter *f; - struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter; + struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter; + bool big_buffer = 0; TAILQ_FOREACH(f, tunnel_list, rules) { + if (!f->is_to_vf) + vsi = pf->main_vsi; + else { + vf = &pf->vfs[f->vf_id]; + vsi = vf->vsi; + } memset(&cld_filter, 0, sizeof(cld_filter)); - rte_memcpy(&cld_filter, &f->input, sizeof(f->input)); - cld_filter.queue_number = f->queue; - i40e_aq_add_cloud_filters(hw, vsi->seid, &cld_filter, 1); + ether_addr_copy((struct ether_addr *)&f->input.outer_mac, + (struct ether_addr *)&cld_filter.element.outer_mac); + ether_addr_copy((struct ether_addr *)&f->input.inner_mac, + (struct ether_addr *)&cld_filter.element.inner_mac); + cld_filter.element.inner_vlan = f->input.inner_vlan; + cld_filter.element.flags = f->input.flags; + cld_filter.element.tenant_id = f->input.tenant_id; + cld_filter.element.queue_number = f->queue; + rte_memcpy(cld_filter.general_fields, + f->input.general_fields, + sizeof(f->input.general_fields)); + + if (((f->input.flags & + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) == + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) || + ((f->input.flags & + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) == + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) || + ((f->input.flags & + I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) == + I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ)) + big_buffer = 1; + + if (big_buffer) + i40e_aq_add_cloud_filters_big_buffer(hw, + vsi->seid, &cld_filter, 1); + else + i40e_aq_add_cloud_filters(hw, vsi->seid, + &cld_filter.element, 1); } } @@ -10254,936 +10878,134 @@ i40e_filter_restore(struct i40e_pf *pf) i40e_fdir_filter_restore(pf); } -static int -is_i40e_pmd(const char *driver_name) +static bool +is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) { - if (!strstr(driver_name, "i40e")) - return -ENOTSUP; - - if (strstr(driver_name, "i40e_vf")) - return -ENOTSUP; + if (strcmp(dev->device->driver->name, drv->driver.name)) + return false; - return 0; + return true; } -int -rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf) +bool +is_i40e_supported(struct rte_eth_dev *dev) { - struct rte_eth_dev *dev; - struct i40e_pf *pf; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - - dev = &rte_eth_devices[port]; - - if (is_i40e_pmd(dev->data->drv_name)) - return -ENOTSUP; - - pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - - if (vf >= pf->vf_num || !pf->vfs) { - PMD_DRV_LOG(ERR, "Invalid argument."); - return -EINVAL; - } - - i40e_notify_vf_link_status(dev, &pf->vfs[vf]); - - return 0; + return is_device_supported(dev, &rte_i40e_pmd); } -int -rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on) +/* Create a QinQ cloud filter + * + * The Fortville NIC has limited resources for tunnel filters, + * so we can only reuse existing filters. + * + * In step 1 we define which Field Vector fields can be used for + * filter types. + * As we do not have the inner tag defined as a field, + * we have to define it first, by reusing one of L1 entries. + * + * In step 2 we are replacing one of existing filter types with + * a new one for QinQ. + * As we reusing L1 and replacing L2, some of the default filter + * types will disappear,which depends on L1 and L2 entries we reuse. + * + * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b) + * + * 1. Create L1 filter of outer vlan (12b) which will be in use + * later when we define the cloud filter. + * a. Valid_flags.replace_cloud = 0 + * b. Old_filter = 10 (Stag_Inner_Vlan) + * c. New_filter = 0x10 + * d. TR bit = 0xff (optional, not used here) + * e. Buffer – 2 entries: + * i. Byte 0 = 8 (outer vlan FV index). + * Byte 1 = 0 (rsv) + * Byte 2-3 = 0x0fff + * ii. Byte 0 = 37 (inner vlan FV index). + * Byte 1 =0 (rsv) + * Byte 2-3 = 0x0fff + * + * Step 2: + * 2. Create cloud filter using two L1 filters entries: stag and + * new filter(outer vlan+ inner vlan) + * a. Valid_flags.replace_cloud = 1 + * b. Old_filter = 1 (instead of outer IP) + * c. New_filter = 0x10 + * d. Buffer – 2 entries: + * i. Byte 0 = 0x80 | 7 (valid | Stag). + * Byte 1-3 = 0 (rsv) + * ii. Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1) + * Byte 9-11 = 0 (rsv) + */ +static int +i40e_cloud_filter_qinq_create(struct i40e_pf *pf) { - struct rte_eth_dev *dev; - struct i40e_pf *pf; - struct i40e_vsi *vsi; - struct i40e_hw *hw; - struct i40e_vsi_context ctxt; - int ret; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - - dev = &rte_eth_devices[port]; - - if (is_i40e_pmd(dev->data->drv_name)) - return -ENOTSUP; - - pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - - if (vf_id >= pf->vf_num || !pf->vfs) { - PMD_DRV_LOG(ERR, "Invalid argument."); - return -EINVAL; - } - - vsi = pf->vfs[vf_id].vsi; - if (!vsi) { - PMD_DRV_LOG(ERR, "Invalid VSI."); - return -EINVAL; - } - - /* Check if it has been already on or off */ - if (vsi->info.valid_sections & - rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) { - if (on) { - if ((vsi->info.sec_flags & - I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == - I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) - return 0; /* already on */ - } else { - if ((vsi->info.sec_flags & - I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0) - return 0; /* already off */ - } - } - - vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); - if (on) - vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; - else - vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; - - memset(&ctxt, 0, sizeof(ctxt)); - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); - ctxt.seid = vsi->seid; + int ret = -ENOTSUP; + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); - hw = I40E_VSI_TO_HW(vsi); - ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); - if (ret != I40E_SUCCESS) { - ret = -ENOTSUP; - PMD_DRV_LOG(ERR, "Failed to update VSI params"); - } + /* Init */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + /* create L1 filter */ + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN; + filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + filter_replace.tr_bit = 0; + + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + /* Field Vector 12b mask */ + filter_replace_buf.data[2] = 0xff; + filter_replace_buf.data[3] = 0x0f; + filter_replace_buf.data[4] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + /* Field Vector 12b mask */ + filter_replace_buf.data[6] = 0xff; + filter_replace_buf.data[7] = 0x0f; + ret = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (ret != I40E_SUCCESS) + return ret; + /* Apply the second L2 cloud filter */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + /* create L2 filter, input for L2 filter will be L1 filter */ + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER; + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP; + filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + ret = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); return ret; } -static int -i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add) -{ - uint32_t j, k; - uint16_t vlan_id; - struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); - struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0}; - int ret; - - for (j = 0; j < I40E_VFTA_SIZE; j++) { - if (!vsi->vfta[j]) - continue; - - for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) { - if (!(vsi->vfta[j] & (1 << k))) - continue; - - vlan_id = j * I40E_UINT32_BIT_SIZE + k; - if (!vlan_id) - continue; - - vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id); - if (add) - ret = i40e_aq_add_vlan(hw, vsi->seid, - &vlan_data, 1, NULL); - else - ret = i40e_aq_remove_vlan(hw, vsi->seid, - &vlan_data, 1, NULL); - if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, - "Failed to add/rm vlan filter"); - return ret; - } - } - } - - return I40E_SUCCESS; -} - -int -rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on) -{ - struct rte_eth_dev *dev; - struct i40e_pf *pf; - struct i40e_vsi *vsi; - struct i40e_hw *hw; - struct i40e_vsi_context ctxt; - int ret; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - - dev = &rte_eth_devices[port]; - - if (is_i40e_pmd(dev->data->drv_name)) - return -ENOTSUP; - - pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - - if (vf_id >= pf->vf_num || !pf->vfs) { - PMD_DRV_LOG(ERR, "Invalid argument."); - return -EINVAL; - } - - vsi = pf->vfs[vf_id].vsi; - if (!vsi) { - PMD_DRV_LOG(ERR, "Invalid VSI."); - return -EINVAL; - } - - /* Check if it has been already on or off */ - if (vsi->vlan_anti_spoof_on == on) - return 0; /* already on or off */ - - vsi->vlan_anti_spoof_on = on; - ret = i40e_add_rm_all_vlan_filter(vsi, on); - if (ret) { - PMD_DRV_LOG(ERR, "Failed to remove VLAN filters."); - return -ENOTSUP; - } - - vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); - if (on) - vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK; - else - vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK; - - memset(&ctxt, 0, sizeof(ctxt)); - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); - ctxt.seid = vsi->seid; - - hw = I40E_VSI_TO_HW(vsi); - ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); - if (ret != I40E_SUCCESS) { - ret = -ENOTSUP; - PMD_DRV_LOG(ERR, "Failed to update VSI params"); - } - - return ret; -} - -static int -i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi) -{ - struct i40e_mac_filter *f; - struct i40e_macvlan_filter *mv_f; - int i, vlan_num; - enum rte_mac_filter_type filter_type; - int ret = I40E_SUCCESS; - void *temp; - - /* remove all the MACs */ - TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) { - vlan_num = vsi->vlan_num; - filter_type = f->mac_info.filter_type; - if (filter_type == RTE_MACVLAN_PERFECT_MATCH || - filter_type == RTE_MACVLAN_HASH_MATCH) { - if (vlan_num == 0) { - PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0"); - return I40E_ERR_PARAM; - } - } else if (filter_type == RTE_MAC_PERFECT_MATCH || - filter_type == RTE_MAC_HASH_MATCH) - vlan_num = 1; - - mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); - if (!mv_f) { - PMD_DRV_LOG(ERR, "failed to allocate memory"); - return I40E_ERR_NO_MEMORY; - } - - for (i = 0; i < vlan_num; i++) { - mv_f[i].filter_type = filter_type; - (void)rte_memcpy(&mv_f[i].macaddr, - &f->mac_info.mac_addr, - ETH_ADDR_LEN); - } - if (filter_type == RTE_MACVLAN_PERFECT_MATCH || - filter_type == RTE_MACVLAN_HASH_MATCH) { - ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, - &f->mac_info.mac_addr); - if (ret != I40E_SUCCESS) { - rte_free(mv_f); - return ret; - } - } - - ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num); - if (ret != I40E_SUCCESS) { - rte_free(mv_f); - return ret; - } - - rte_free(mv_f); - ret = I40E_SUCCESS; - } - - return ret; -} - -static int -i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi) -{ - struct i40e_mac_filter *f; - struct i40e_macvlan_filter *mv_f; - int i, vlan_num = 0; - int ret = I40E_SUCCESS; - void *temp; - - /* restore all the MACs */ - TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) { - if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) || - (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) { - /** - * If vlan_num is 0, that's the first time to add mac, - * set mask for vlan_id 0. - */ - if (vsi->vlan_num == 0) { - i40e_set_vlan_filter(vsi, 0, 1); - vsi->vlan_num = 1; - } - vlan_num = vsi->vlan_num; - } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) || - (f->mac_info.filter_type == RTE_MAC_HASH_MATCH)) - vlan_num = 1; - - mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); - if (!mv_f) { - PMD_DRV_LOG(ERR, "failed to allocate memory"); - return I40E_ERR_NO_MEMORY; - } - - for (i = 0; i < vlan_num; i++) { - mv_f[i].filter_type = f->mac_info.filter_type; - (void)rte_memcpy(&mv_f[i].macaddr, - &f->mac_info.mac_addr, - ETH_ADDR_LEN); - } - - if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH || - f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) { - ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, - &f->mac_info.mac_addr); - if (ret != I40E_SUCCESS) { - rte_free(mv_f); - return ret; - } - } - - ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num); - if (ret != I40E_SUCCESS) { - rte_free(mv_f); - return ret; - } - - rte_free(mv_f); - ret = I40E_SUCCESS; - } - - return ret; -} - -static int -i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on) -{ - struct i40e_vsi_context ctxt; - struct i40e_hw *hw; - int ret; - - if (!vsi) - return -EINVAL; - - hw = I40E_VSI_TO_HW(vsi); - - /* Use the FW API if FW >= v5.0 */ - if (hw->aq.fw_maj_ver < 5) { - PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback"); - return -ENOTSUP; - } - - /* Check if it has been already on or off */ - if (vsi->info.valid_sections & - rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) { - if (on) { - if ((vsi->info.switch_id & - I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == - I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) - return 0; /* already on */ - } else { - if ((vsi->info.switch_id & - I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0) - return 0; /* already off */ - } - } - - /* remove all the MAC and VLAN first */ - ret = i40e_vsi_rm_mac_filter(vsi); - if (ret) { - PMD_INIT_LOG(ERR, "Failed to remove MAC filters."); - return ret; - } - if (vsi->vlan_anti_spoof_on) { - ret = i40e_add_rm_all_vlan_filter(vsi, 0); - if (ret) { - PMD_INIT_LOG(ERR, "Failed to remove VLAN filters."); - return ret; - } - } - - vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - if (on) - vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB; - else - vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB; - - memset(&ctxt, 0, sizeof(ctxt)); - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); - ctxt.seid = vsi->seid; - - ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); - if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to update VSI params"); - return ret; - } - - /* add all the MAC and VLAN back */ - ret = i40e_vsi_restore_mac_filter(vsi); - if (ret) - return ret; - if (vsi->vlan_anti_spoof_on) { - ret = i40e_add_rm_all_vlan_filter(vsi, 1); - if (ret) - return ret; - } - - return ret; -} - -int -rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on) -{ - struct rte_eth_dev *dev; - struct i40e_pf *pf; - struct i40e_pf_vf *vf; - struct i40e_vsi *vsi; - uint16_t vf_id; - int ret; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - - dev = &rte_eth_devices[port]; - - if (is_i40e_pmd(dev->data->drv_name)) - return -ENOTSUP; - - pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - - /* setup PF TX loopback */ - vsi = pf->main_vsi; - ret = i40e_vsi_set_tx_loopback(vsi, on); - if (ret) - return -ENOTSUP; - - /* setup TX loopback for all the VFs */ - if (!pf->vfs) { - /* if no VF, do nothing. */ - return 0; - } - - for (vf_id = 0; vf_id < pf->vf_num; vf_id++) { - vf = &pf->vfs[vf_id]; - vsi = vf->vsi; - - ret = i40e_vsi_set_tx_loopback(vsi, on); - if (ret) - return -ENOTSUP; - } - - return ret; -} - -int -rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on) -{ - struct rte_eth_dev *dev; - struct i40e_pf *pf; - struct i40e_vsi *vsi; - struct i40e_hw *hw; - int ret; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - - dev = &rte_eth_devices[port]; - - if (is_i40e_pmd(dev->data->drv_name)) - return -ENOTSUP; - - pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - - if (vf_id >= pf->vf_num || !pf->vfs) { - PMD_DRV_LOG(ERR, "Invalid argument."); - return -EINVAL; - } - - vsi = pf->vfs[vf_id].vsi; - if (!vsi) { - PMD_DRV_LOG(ERR, "Invalid VSI."); - return -EINVAL; - } - - hw = I40E_VSI_TO_HW(vsi); - - ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, - on, NULL, true); - if (ret != I40E_SUCCESS) { - ret = -ENOTSUP; - PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode"); - } - - return ret; -} - -int -rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on) -{ - struct rte_eth_dev *dev; - struct i40e_pf *pf; - struct i40e_vsi *vsi; - struct i40e_hw *hw; - int ret; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - - dev = &rte_eth_devices[port]; - - if (is_i40e_pmd(dev->data->drv_name)) - return -ENOTSUP; - - pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - - if (vf_id >= pf->vf_num || !pf->vfs) { - PMD_DRV_LOG(ERR, "Invalid argument."); - return -EINVAL; - } - - vsi = pf->vfs[vf_id].vsi; - if (!vsi) { - PMD_DRV_LOG(ERR, "Invalid VSI."); - return -EINVAL; - } - - hw = I40E_VSI_TO_HW(vsi); - - ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, - on, NULL); - if (ret != I40E_SUCCESS) { - ret = -ENOTSUP; - PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode"); - } - - return ret; -} - -int -rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id, - struct ether_addr *mac_addr) -{ - struct i40e_mac_filter *f; - struct rte_eth_dev *dev; - struct i40e_pf_vf *vf; - struct i40e_vsi *vsi; - struct i40e_pf *pf; - void *temp; - - if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS) - return -EINVAL; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - - dev = &rte_eth_devices[port]; - - if (is_i40e_pmd(dev->data->drv_name)) - return -ENOTSUP; - - pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - - if (vf_id >= pf->vf_num || !pf->vfs) - return -EINVAL; - - vf = &pf->vfs[vf_id]; - vsi = vf->vsi; - if (!vsi) { - PMD_DRV_LOG(ERR, "Invalid VSI."); - return -EINVAL; - } - - ether_addr_copy(mac_addr, &vf->mac_addr); - - /* Remove all existing mac */ - TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) - i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr); - - return 0; -} - -/* Set vlan strip on/off for specific VF from host */ -int -rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on) -{ - struct rte_eth_dev *dev; - struct i40e_pf *pf; - struct i40e_vsi *vsi; - int ret; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - - dev = &rte_eth_devices[port]; - - if (is_i40e_pmd(dev->data->drv_name)) - return -ENOTSUP; - - pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - - if (vf_id >= pf->vf_num || !pf->vfs) { - PMD_DRV_LOG(ERR, "Invalid argument."); - return -EINVAL; - } - - vsi = pf->vfs[vf_id].vsi; - - if (!vsi) - return -EINVAL; - - ret = i40e_vsi_config_vlan_stripping(vsi, !!on); - if (ret != I40E_SUCCESS) { - ret = -ENOTSUP; - PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!"); - } - - return ret; -} - -int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id, - uint16_t vlan_id) -{ - struct rte_eth_dev *dev; - struct i40e_pf *pf; - struct i40e_hw *hw; - struct i40e_vsi *vsi; - struct i40e_vsi_context ctxt; - int ret; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - - if (vlan_id > ETHER_MAX_VLAN_ID) { - PMD_DRV_LOG(ERR, "Invalid VLAN ID."); - return -EINVAL; - } - - dev = &rte_eth_devices[port]; - - if (is_i40e_pmd(dev->data->drv_name)) - return -ENOTSUP; - - pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - hw = I40E_PF_TO_HW(pf); - - /** - * return -ENODEV if SRIOV not enabled, VF number not configured - * or no queue assigned. - */ - if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || - pf->vf_nb_qps == 0) - return -ENODEV; - - if (vf_id >= pf->vf_num || !pf->vfs) { - PMD_DRV_LOG(ERR, "Invalid VF ID."); - return -EINVAL; - } - - vsi = pf->vfs[vf_id].vsi; - if (!vsi) { - PMD_DRV_LOG(ERR, "Invalid VSI."); - return -EINVAL; - } - - vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); - vsi->info.pvid = vlan_id; - if (vlan_id > 0) - vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID; - else - vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID; - - memset(&ctxt, 0, sizeof(ctxt)); - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); - ctxt.seid = vsi->seid; - - hw = I40E_VSI_TO_HW(vsi); - ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); - if (ret != I40E_SUCCESS) { - ret = -ENOTSUP; - PMD_DRV_LOG(ERR, "Failed to update VSI params"); - } - - return ret; -} - -int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id, - uint8_t on) -{ - struct rte_eth_dev *dev; - struct i40e_pf *pf; - struct i40e_vsi *vsi; - struct i40e_hw *hw; - int ret; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - - if (on > 1) { - PMD_DRV_LOG(ERR, "on should be 0 or 1."); - return -EINVAL; - } - - dev = &rte_eth_devices[port]; - - if (is_i40e_pmd(dev->data->drv_name)) - return -ENOTSUP; - - pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - hw = I40E_PF_TO_HW(pf); - - if (vf_id >= pf->vf_num || !pf->vfs) { - PMD_DRV_LOG(ERR, "Invalid VF ID."); - return -EINVAL; - } - - /** - * return -ENODEV if SRIOV not enabled, VF number not configured - * or no queue assigned. - */ - if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || - pf->vf_nb_qps == 0) { - PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue."); - return -ENODEV; - } - - vsi = pf->vfs[vf_id].vsi; - if (!vsi) { - PMD_DRV_LOG(ERR, "Invalid VSI."); - return -EINVAL; - } - - hw = I40E_VSI_TO_HW(vsi); - - ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, on, NULL); - if (ret != I40E_SUCCESS) { - ret = -ENOTSUP; - PMD_DRV_LOG(ERR, "Failed to set VSI broadcast"); - } - - return ret; -} - -int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on) -{ - struct rte_eth_dev *dev; - struct i40e_pf *pf; - struct i40e_hw *hw; - struct i40e_vsi *vsi; - struct i40e_vsi_context ctxt; - int ret; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - - if (on > 1) { - PMD_DRV_LOG(ERR, "on should be 0 or 1."); - return -EINVAL; - } - - dev = &rte_eth_devices[port]; - - if (is_i40e_pmd(dev->data->drv_name)) - return -ENOTSUP; - - pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - hw = I40E_PF_TO_HW(pf); - - /** - * return -ENODEV if SRIOV not enabled, VF number not configured - * or no queue assigned. - */ - if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || - pf->vf_nb_qps == 0) { - PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue."); - return -ENODEV; - } - - if (vf_id >= pf->vf_num || !pf->vfs) { - PMD_DRV_LOG(ERR, "Invalid VF ID."); - return -EINVAL; - } - - vsi = pf->vfs[vf_id].vsi; - if (!vsi) { - PMD_DRV_LOG(ERR, "Invalid VSI."); - return -EINVAL; - } - - vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); - if (on) { - vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED; - vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED; - } else { - vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED; - vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED; - } - - memset(&ctxt, 0, sizeof(ctxt)); - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); - ctxt.seid = vsi->seid; - - hw = I40E_VSI_TO_HW(vsi); - ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); - if (ret != I40E_SUCCESS) { - ret = -ENOTSUP; - PMD_DRV_LOG(ERR, "Failed to update VSI params"); - } - - return ret; -} - -int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id, - uint64_t vf_mask, uint8_t on) -{ - struct rte_eth_dev *dev; - struct i40e_pf *pf; - struct i40e_hw *hw; - uint16_t vf_idx; - int ret = I40E_SUCCESS; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - - dev = &rte_eth_devices[port]; - - if (is_i40e_pmd(dev->data->drv_name)) - return -ENOTSUP; - - if (vlan_id > ETHER_MAX_VLAN_ID) { - PMD_DRV_LOG(ERR, "Invalid VLAN ID."); - return -EINVAL; - } - - if (vf_mask == 0) { - PMD_DRV_LOG(ERR, "No VF."); - return -EINVAL; - } - - if (on > 1) { - PMD_DRV_LOG(ERR, "on is should be 0 or 1."); - return -EINVAL; - } - - pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - hw = I40E_PF_TO_HW(pf); - - /** - * return -ENODEV if SRIOV not enabled, VF number not configured - * or no queue assigned. - */ - if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || - pf->vf_nb_qps == 0) { - PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue."); - return -ENODEV; - } - - for (vf_idx = 0; vf_idx < 64 && ret == I40E_SUCCESS; vf_idx++) { - if (vf_mask & ((uint64_t)(1ULL << vf_idx))) { - if (on) - ret = i40e_vsi_add_vlan(pf->vfs[vf_idx].vsi, - vlan_id); - else - ret = i40e_vsi_delete_vlan(pf->vfs[vf_idx].vsi, - vlan_id); - } - } - - if (ret != I40E_SUCCESS) { - ret = -ENOTSUP; - PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on); - } - - return ret; -} - -int -rte_pmd_i40e_get_vf_stats(uint8_t port, - uint16_t vf_id, - struct rte_eth_stats *stats) -{ - struct rte_eth_dev *dev; - struct i40e_pf *pf; - struct i40e_vsi *vsi; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - - dev = &rte_eth_devices[port]; - - if (is_i40e_pmd(dev->data->drv_name)) - return -ENOTSUP; - - pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - - if (vf_id >= pf->vf_num || !pf->vfs) { - PMD_DRV_LOG(ERR, "Invalid VF ID."); - return -EINVAL; - } - - vsi = pf->vfs[vf_id].vsi; - if (!vsi) { - PMD_DRV_LOG(ERR, "Invalid VSI."); - return -EINVAL; - } - - i40e_update_vsi_stats(vsi); - - stats->ipackets = vsi->eth_stats.rx_unicast + - vsi->eth_stats.rx_multicast + - vsi->eth_stats.rx_broadcast; - stats->opackets = vsi->eth_stats.tx_unicast + - vsi->eth_stats.tx_multicast + - vsi->eth_stats.tx_broadcast; - stats->ibytes = vsi->eth_stats.rx_bytes; - stats->obytes = vsi->eth_stats.tx_bytes; - stats->ierrors = vsi->eth_stats.rx_discards; - stats->oerrors = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards; - - return 0; -} - -int -rte_pmd_i40e_reset_vf_stats(uint8_t port, - uint16_t vf_id) -{ - struct rte_eth_dev *dev; - struct i40e_pf *pf; - struct i40e_vsi *vsi; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - - dev = &rte_eth_devices[port]; - - if (is_i40e_pmd(dev->data->drv_name)) - return -ENOTSUP; - - pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - - if (vf_id >= pf->vf_num || !pf->vfs) { - PMD_DRV_LOG(ERR, "Invalid VF ID."); - return -EINVAL; - } - - vsi = pf->vfs[vf_id].vsi; - if (!vsi) { - PMD_DRV_LOG(ERR, "Invalid VSI."); - return -EINVAL; - } - - vsi->offset_loaded = false; - i40e_update_vsi_stats(vsi); - - return 0; +RTE_INIT(i40e_init_log); +static void +i40e_init_log(void) +{ + i40e_logtype_init = rte_log_register("pmd.i40e.init"); + if (i40e_logtype_init >= 0) + rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE); + i40e_logtype_driver = rte_log_register("pmd.i40e.driver"); + if (i40e_logtype_driver >= 0) + rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE); }