X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev.c;h=720f067d6e8ea167f1abd302d30830596fbc0acb;hb=0e7ba7326f2d8f8e8a6286bbd96e0a361455448b;hp=6d09bba37a1387b769561cc29398ce0174e258b7;hpb=d75547718c873899b7f26616c50df7936a8f45e8;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 6d09bba37a..720f067d6e 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -40,10 +40,12 @@ #include #include +#include #include #include #include #include +#include #include #include #include @@ -72,7 +74,7 @@ /* Maximun number of capability elements */ #define I40E_MAX_CAP_ELE_NUM 128 -/* Wait count and inteval */ +/* Wait count and interval */ #define I40E_CHK_Q_ENA_COUNT 1000 #define I40E_CHK_Q_ENA_INTERVAL_US 1000 @@ -84,12 +86,6 @@ /* Flow control default timer */ #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU -/* Flow control default high water */ -#define I40E_DEFAULT_HIGH_WATER (0x1C40/1024) - -/* Flow control default low water */ -#define I40E_DEFAULT_LOW_WATER (0x1A40/1024) - /* Flow control enable fwd bit */ #define I40E_PRTMAC_FWD_CTRL 0x00000001 @@ -99,6 +95,12 @@ /* Kilobytes shift */ #define I40E_KILOSHIFT 10 +/* Flow control default high water */ +#define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT) + +/* Flow control default low water */ +#define I40E_DEFAULT_LOW_WATER (0xF2000 >> I40E_KILOSHIFT) + /* Receive Average Packet Size in Byte*/ #define I40E_PACKET_AVERAGE_SIZE 128 @@ -248,6 +250,7 @@ static int i40e_dev_configure(struct rte_eth_dev *dev); static int i40e_dev_start(struct rte_eth_dev *dev); static void i40e_dev_stop(struct rte_eth_dev *dev); static void i40e_dev_close(struct rte_eth_dev *dev); +static int i40e_dev_reset(struct rte_eth_dev *dev); static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev); static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev); static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev); @@ -289,10 +292,10 @@ static int i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf); -static void i40e_macaddr_add(struct rte_eth_dev *dev, - struct ether_addr *mac_addr, - uint32_t index, - uint32_t pool); +static int i40e_macaddr_add(struct rte_eth_dev *dev, + struct ether_addr *mac_addr, + uint32_t index, + uint32_t pool); static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index); static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, @@ -317,8 +320,7 @@ static void i40e_stat_update_48(struct i40e_hw *hw, uint64_t *offset, uint64_t *stat); static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue); -static void i40e_dev_interrupt_handler(struct rte_intr_handle *handle, - void *param); +static void i40e_dev_interrupt_handler(void *param); static int i40e_res_pool_init(struct i40e_res_pool_info *pool, uint32_t base, uint32_t num); static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool); @@ -332,10 +334,6 @@ static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi); static int i40e_pf_config_mq_rx(struct i40e_pf *pf); static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on); -static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, - struct i40e_macvlan_filter *mv_f, - int num, - struct ether_addr *addr); static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi, struct i40e_macvlan_filter *mv_f, int num, @@ -409,14 +407,19 @@ static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf, struct i40e_ethertype_filter *filter); static int i40e_tunnel_filter_convert( - struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter, + struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter, struct i40e_tunnel_filter *tunnel_filter); static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf, struct i40e_tunnel_filter *tunnel_filter); +static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf); static void i40e_ethertype_filter_restore(struct i40e_pf *pf); static void i40e_tunnel_filter_restore(struct i40e_pf *pf); static void i40e_filter_restore(struct i40e_pf *pf); +static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev); + +int i40e_logtype_init; +int i40e_logtype_driver; static const struct rte_pci_id pci_id_i40e_map[] = { { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) }, @@ -447,6 +450,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .dev_start = i40e_dev_start, .dev_stop = i40e_dev_stop, .dev_close = i40e_dev_close, + .dev_reset = i40e_dev_reset, .promiscuous_enable = i40e_dev_promiscuous_enable, .promiscuous_disable = i40e_dev_promiscuous_disable, .allmulticast_enable = i40e_dev_allmulticast_enable, @@ -478,6 +482,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .rx_queue_release = i40e_dev_rx_queue_release, .rx_queue_count = i40e_dev_rx_queue_count, .rx_descriptor_done = i40e_dev_rx_descriptor_done, + .rx_descriptor_status = i40e_dev_rx_descriptor_status, + .tx_descriptor_status = i40e_dev_tx_descriptor_status, .tx_queue_setup = i40e_dev_tx_queue_setup, .tx_queue_release = i40e_dev_tx_queue_release, .dev_led_on = i40e_dev_led_on, @@ -511,6 +517,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .get_eeprom = i40e_get_eeprom, .mac_addr_set = i40e_set_default_mac_addr, .mtu_set = i40e_dev_mtu_set, + .tm_ops_get = i40e_tm_ops_get, }; /* store statistics names and its offset in stats structure */ @@ -626,16 +633,23 @@ static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = { #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \ sizeof(rte_i40e_txq_prio_strings[0])) -static struct eth_driver rte_i40e_pmd = { - .pci_drv = { - .id_table = pci_id_i40e_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, - .probe = rte_eth_dev_pci_probe, - .remove = rte_eth_dev_pci_remove, - }, - .eth_dev_init = eth_i40e_dev_init, - .eth_dev_uninit = eth_i40e_dev_uninit, - .dev_private_size = sizeof(struct i40e_adapter), +static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct i40e_adapter), eth_i40e_dev_init); +} + +static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit); +} + +static struct rte_pci_driver rte_i40e_pmd = { + .id_table = pci_id_i40e_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_i40e_pci_probe, + .remove = eth_i40e_pci_remove, }; static inline int @@ -666,9 +680,9 @@ rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev, return 0; } -RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv); +RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map); -RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio"); +RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci"); #ifndef I40E_GLQF_ORT #define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) @@ -676,6 +690,9 @@ RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio"); #ifndef I40E_GLQF_PIT #define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) #endif +#ifndef I40E_GLQF_L3_MAP +#define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4)) +#endif static inline void i40e_GLQF_reg_init(struct i40e_hw *hw) { @@ -721,8 +738,8 @@ i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf) pf->main_vsi_seid, 0, TRUE, NULL, NULL); if (ret) - PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control " - " frames from VSIs."); + PMD_INIT_LOG(ERR, + "Failed to add filter to drop flow control frames from VSIs."); } static int @@ -865,7 +882,7 @@ is_floating_veb_supported(struct rte_devargs *devargs) static void config_floating_veb(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -898,12 +915,14 @@ i40e_init_ethtype_filter_list(struct rte_eth_dev *dev) .entries = I40E_MAX_ETHERTYPE_FILTER_NUM, .key_len = sizeof(struct i40e_ethertype_filter_input), .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), }; /* Initialize ethertype filter rule list and hash */ TAILQ_INIT(ðertype_rule->ethertype_list); snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE, - "ethertype_%s", dev->data->name); + "ethertype_%s", dev->device->name); ethertype_rule->hash_table = rte_hash_create(ðertype_hash_params); if (!ethertype_rule->hash_table) { PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!"); @@ -941,12 +960,14 @@ i40e_init_tunnel_filter_list(struct rte_eth_dev *dev) .entries = I40E_MAX_TUNNEL_FILTER_NUM, .key_len = sizeof(struct i40e_tunnel_filter_input), .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), }; /* Initialize tunnel filter rule list and hash */ TAILQ_INIT(&tunnel_rule->tunnel_list); snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE, - "tunnel_%s", dev->data->name); + "tunnel_%s", dev->device->name); tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params); if (!tunnel_rule->hash_table) { PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!"); @@ -984,12 +1005,14 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev) .entries = I40E_MAX_FDIR_FILTER_NUM, .key_len = sizeof(struct rte_eth_fdir_input), .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), }; /* Initialize flow director filter rule list and hash */ TAILQ_INIT(&fdir_info->fdir_list); snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, - "fdir_%s", dev->data->name); + "fdir_%s", dev->device->name); fdir_info->hash_table = rte_hash_create(&fdir_hash_params); if (!fdir_info->hash_table) { PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); @@ -1040,11 +1063,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) i40e_set_tx_function(dev); return 0; } - pci_dev = I40E_DEV_TO_PCI(dev); + i40e_set_default_ptype_table(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); intr_handle = &pci_dev->intr_handle; rte_eth_copy_pci_info(dev, pci_dev); - dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE; + dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); pf->adapter->eth_dev = dev; @@ -1053,8 +1077,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) hw->back = I40E_PF_TO_ADAPTER(pf); hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr); if (!hw->hw_addr) { - PMD_INIT_LOG(ERR, "Hardware is not available, " - "as address is NULL"); + PMD_INIT_LOG(ERR, + "Hardware is not available, as address is NULL"); return -ENODEV; } @@ -1111,15 +1135,18 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) ((hw->nvm.version >> 4) & 0xff), (hw->nvm.version & 0xf), hw->nvm.eetrack); + /* initialise the L3_MAP register */ + ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40), + 0x00000028, NULL); + if (ret) + PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", ret); + /* Need the special FW version to support floating VEB */ config_floating_veb(dev); /* Clear PXE mode */ i40e_clear_pxe_mode(hw); - ret = i40e_dev_sync_phy_type(hw); - if (ret) { - PMD_INIT_LOG(ERR, "Failed to sync phy type: %d", ret); - goto err_sync_phy_type; - } + i40e_dev_sync_phy_type(hw); + /* * On X710, performance number is far from the expectation on recent * firmware versions. The fix for this issue may not be integrated in @@ -1190,8 +1217,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* Set the global registers with default ether type value */ ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN); if (ret != I40E_SUCCESS) { - PMD_INIT_LOG(ERR, "Failed to set the default outer " - "VLAN ether type"); + PMD_INIT_LOG(ERR, + "Failed to set the default outer VLAN ether type"); goto err_setup_pf_switch; } @@ -1227,13 +1254,22 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* Should be after VSI initialized */ dev->data->mac_addrs = rte_zmalloc("i40e", len, 0); if (!dev->data->mac_addrs) { - PMD_INIT_LOG(ERR, "Failed to allocated memory " - "for storing mac address"); + PMD_INIT_LOG(ERR, + "Failed to allocated memory for storing mac address"); goto err_mac_alloc; } ether_addr_copy((struct ether_addr *)hw->mac.perm_addr, &dev->data->mac_addrs[0]); + /* Init dcb to sw mode by default */ + ret = i40e_dcb_init_configure(dev, TRUE); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(INFO, "Failed to init dcb."); + pf->flags &= ~I40E_FLAG_DCB; + } + /* Update HW struct after DCB configuration */ + i40e_get_cap(hw); + /* initialize pf host driver to setup SRIOV resource if applicable */ i40e_pf_host_init(dev); @@ -1262,12 +1298,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* initialize mirror rule list */ TAILQ_INIT(&pf->mirror_list); - /* Init dcb to sw mode by default */ - ret = i40e_dcb_init_configure(dev, TRUE); - if (ret != I40E_SUCCESS) { - PMD_INIT_LOG(INFO, "Failed to init dcb."); - pf->flags &= ~I40E_FLAG_DCB; - } + /* initialize Traffic Manager configuration */ + i40e_tm_conf_init(dev); ret = i40e_init_ethtype_filter_list(dev); if (ret < 0) @@ -1302,7 +1334,6 @@ err_msix_pool_init: err_qp_pool_init: err_parameter_init: err_get_capabilities: -err_sync_phy_type: (void)i40e_shutdown_adminq(hw); return ret; @@ -1385,7 +1416,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - pci_dev = I40E_DEV_TO_PCI(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); intr_handle = &pci_dev->intr_handle; if (hw->adapter_stopped == 0) @@ -1432,6 +1463,9 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) rte_free(p_flow); } + /* Remove all Traffic Manager configuration */ + i40e_tm_conf_uninit(dev); + return 0; } @@ -1441,9 +1475,14 @@ i40e_dev_configure(struct rte_eth_dev *dev) struct i40e_adapter *ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; int i, ret; + ret = i40e_dev_sync_phy_type(hw); + if (ret) + return ret; + /* Initialize to TRUE. If any of Rx queues doesn't meet the * bulk allocation or vector Rx preconditions we will reset it. */ @@ -1518,7 +1557,7 @@ void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t msix_vect = vsi->msix_intr; @@ -1557,7 +1596,8 @@ i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi) static void __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, - int base_queue, int nb_queue) + int base_queue, int nb_queue, + uint16_t itr_idx) { int i; uint32_t val; @@ -1566,7 +1606,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, /* Bind all RX queues to allocated MSIX interrupt */ for (i = 0; i < nb_queue; i++) { val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | - I40E_QINT_RQCTL_ITR_INDX_MASK | + itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT | ((base_queue + i + 1) << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | @@ -1629,10 +1669,10 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, } void -i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) +i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t msix_vect = vsi->msix_intr; @@ -1657,7 +1697,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) /* VF bind interrupt */ if (vsi->type == I40E_VSI_SRIOV) { __vsi_queues_bind_intr(vsi, msix_vect, - vsi->base_queue, vsi->nb_qps); + vsi->base_queue, vsi->nb_qps, + itr_idx); return; } @@ -1683,7 +1724,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) /* no enough msix_vect, map all to one */ __vsi_queues_bind_intr(vsi, msix_vect, vsi->base_queue + i, - vsi->nb_used_qps - i); + vsi->nb_used_qps - i, + itr_idx); for (; !!record && i < vsi->nb_used_qps; i++) intr_handle->intr_vec[queue_idx + i] = msix_vect; @@ -1691,7 +1733,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) } /* 1:1 queue/msix_vect mapping */ __vsi_queues_bind_intr(vsi, msix_vect, - vsi->base_queue + i, 1); + vsi->base_queue + i, 1, + itr_idx); if (!!record) intr_handle->intr_vec[queue_idx + i] = msix_vect; @@ -1704,7 +1747,7 @@ static void i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t interval = i40e_calc_itr_interval(\ @@ -1736,7 +1779,7 @@ static void i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t msix_intr, i; @@ -1777,11 +1820,15 @@ i40e_parse_link_speeds(uint16_t link_speeds) static int i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, - uint8_t force_speed) + uint8_t force_speed, + bool is_up) { enum i40e_status_code status; struct i40e_aq_get_phy_abilities_resp phy_ab; struct i40e_aq_set_phy_config phy_conf; + enum i40e_aq_phy_type cnt; + uint32_t phy_type_mask = 0; + const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX | I40E_AQ_PHY_FLAG_PAUSE_RX | I40E_AQ_PHY_FLAG_PAUSE_RX | @@ -1799,6 +1846,10 @@ i40e_phy_conf_link(struct i40e_hw *hw, if (status) return ret; + /* If link already up, no need to set up again */ + if (is_up && phy_ab.phy_type != 0) + return I40E_SUCCESS; + memset(&phy_conf, 0, sizeof(phy_conf)); /* bits 0-2 use the values from get_phy_abilities_resp */ @@ -1809,13 +1860,21 @@ i40e_phy_conf_link(struct i40e_hw *hw, if (abilities & I40E_AQ_PHY_AN_ENABLED) phy_conf.link_speed = advt; else - phy_conf.link_speed = force_speed; + phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed; phy_conf.abilities = abilities; + + + /* To enable link, phy_type mask needs to include each type */ + for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++) + phy_type_mask |= 1 << cnt; + /* use get_phy_abilities_resp value for the rest */ - phy_conf.phy_type = phy_ab.phy_type; - phy_conf.phy_type_ext = phy_ab.phy_type_ext; + phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0; + phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR | + I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR | + I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0; phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info; phy_conf.eee_capability = phy_ab.eee_capability; phy_conf.eeer = phy_ab.eeer_val; @@ -1847,13 +1906,7 @@ i40e_apply_link_speed(struct rte_eth_dev *dev) abilities |= I40E_AQ_PHY_AN_ENABLED; abilities |= I40E_AQ_PHY_LINK_ENABLED; - /* Skip changing speed on 40G interfaces, FW does not support */ - if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) { - speed = I40E_LINK_SPEED_UNKNOWN; - abilities |= I40E_AQ_PHY_AN_ENABLED; - } - - return i40e_phy_conf_link(hw, abilities, speed); + return i40e_phy_conf_link(hw, abilities, speed, true); } static int @@ -1863,9 +1916,10 @@ i40e_dev_start(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *main_vsi = pf->main_vsi; int ret, i; - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; + struct i40e_vsi *vsi; hw->adapter_stopped = 0; @@ -1892,8 +1946,9 @@ i40e_dev_start(struct rte_eth_dev *dev) dev->data->nb_rx_queues * sizeof(int), 0); if (!intr_handle->intr_vec) { - PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" - " intr_vec\n", dev->data->nb_rx_queues); + PMD_INIT_LOG(ERR, + "Failed to allocate %d rx_queues intr_vec", + dev->data->nb_rx_queues); return -ENOMEM; } } @@ -1908,19 +1963,21 @@ i40e_dev_start(struct rte_eth_dev *dev) /* Map queues with MSIX interrupt */ main_vsi->nb_used_qps = dev->data->nb_rx_queues - pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; - i40e_vsi_queues_bind_intr(main_vsi); + i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT); i40e_vsi_enable_queues_intr(main_vsi); /* Map VMDQ VSI queues with MSIX interrupt */ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; - i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi); + i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi, + I40E_ITR_INDEX_DEFAULT); i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi); } /* enable FDIR MSIX interrupt */ if (pf->fdir.fdir_vsi) { - i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi); + i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi, + I40E_ITR_INDEX_NONE); i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi); } @@ -1943,6 +2000,15 @@ i40e_dev_start(struct rte_eth_dev *dev) PMD_DRV_LOG(INFO, "fail to set vsi broadcast"); } + /* Enable the VLAN promiscuous mode. */ + if (pf->vfs) { + for (i = 0; i < pf->vf_num; i++) { + vsi = pf->vfs[i].vsi; + i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid, + true, NULL); + } + } + /* Apply link configure */ if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | @@ -1966,9 +2032,9 @@ i40e_dev_start(struct rte_eth_dev *dev) i40e_pf_enable_irq0(hw); if (dev->data->dev_conf.intr_conf.lsc != 0) - PMD_INIT_LOG(INFO, "lsc won't enable because of" - " no intr multiplex\n"); - } else if (dev->data->dev_conf.intr_conf.lsc != 0) { + PMD_INIT_LOG(INFO, + "lsc won't enable because of no intr multiplex"); + } else { ret = i40e_aq_set_phy_int_mask(hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL | @@ -1976,7 +2042,7 @@ i40e_dev_start(struct rte_eth_dev *dev) if (ret != I40E_SUCCESS) PMD_DRV_LOG(WARNING, "Fail to set phy mask"); - /* Call get_link_info aq commond to enable LSE */ + /* Call get_link_info aq commond to enable/disable LSE */ i40e_dev_link_update(dev, 0); } @@ -1985,6 +2051,11 @@ i40e_dev_start(struct rte_eth_dev *dev) i40e_filter_restore(pf); + if (pf->tm_conf.root && !pf->tm_conf.committed) + PMD_DRV_LOG(WARNING, + "please call hierarchy_commit() " + "before starting the port"); + return I40E_SUCCESS; err_up: @@ -1998,12 +2069,15 @@ static void i40e_dev_stop(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *main_vsi = pf->main_vsi; struct i40e_mirror_rule *p_mirror; - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int i; + if (hw->adapter_stopped == 1) + return; /* Disable all queues */ i40e_dev_switch_queues(pf, FALSE); @@ -2045,6 +2119,11 @@ i40e_dev_stop(struct rte_eth_dev *dev) rte_free(intr_handle->intr_vec); intr_handle->intr_vec = NULL; } + + /* reset hierarchy commit */ + pf->tm_conf.committed = false; + + hw->adapter_stopped = 1; } static void @@ -2052,7 +2131,7 @@ i40e_dev_close(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t reg; int i; @@ -2060,7 +2139,6 @@ i40e_dev_close(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); i40e_dev_stop(dev); - hw->adapter_stopped = 1; i40e_dev_free_queues(dev); /* Disable interrupt */ @@ -2070,18 +2148,17 @@ i40e_dev_close(struct rte_eth_dev *dev) /* shutdown and destroy the HMC */ i40e_shutdown_lan_hmc(hw); - /* release all the existing VSIs and VEBs */ - i40e_fdir_teardown(pf); - i40e_vsi_release(pf->main_vsi); - for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { i40e_vsi_release(pf->vmdq[i].vsi); pf->vmdq[i].vsi = NULL; } - rte_free(pf->vmdq); pf->vmdq = NULL; + /* release all the existing VSIs and VEBs */ + i40e_fdir_teardown(pf); + i40e_vsi_release(pf->main_vsi); + /* shutdown the adminq */ i40e_aq_queue_shutdown(hw, true); i40e_shutdown_adminq(hw); @@ -2096,6 +2173,32 @@ i40e_dev_close(struct rte_eth_dev *dev) I40E_WRITE_FLUSH(hw); } +/* + * Reset PF device only to re-initialize resources in PMD layer + */ +static int +i40e_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + /* When a DPDK PMD PF begin to reset PF port, it should notify all + * its VF to make them align with it. The detailed notification + * mechanism is PMD specific. As to i40e PF, it is rather complex. + * To avoid unexpected behavior in VF, currently reset of PF with + * SR-IOV activation is not supported. It might be supported later. + */ + if (dev->data->sriov.active) + return -ENOTSUP; + + ret = eth_i40e_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_i40e_dev_init(dev); + + return ret; +} + static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev) { @@ -2186,7 +2289,7 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK; - return i40e_phy_conf_link(hw, abilities, speed); + return i40e_phy_conf_link(hw, abilities, speed, false); } int @@ -2219,11 +2322,11 @@ i40e_dev_link_update(struct rte_eth_dev *dev, } link.link_status = link_status.link_info & I40E_AQ_LINK_UP; - if (!wait_to_complete) + if (!wait_to_complete || link.link_status) break; rte_delay_ms(CHECK_INTERVAL); - } while (!link.link_status && rep_cnt--); + } while (--rep_cnt); if (!link.link_status) goto out; @@ -2264,6 +2367,8 @@ out: if (link.link_status == old.link_status) return -1; + i40e_notify_all_vfs_link_status(dev); + return 0; } @@ -2288,6 +2393,10 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi) i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx), vsi->offset_loaded, &oes->rx_broadcast, &nes->rx_broadcast); + /* exclude CRC bytes */ + nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast + + nes->rx_broadcast) * ETHER_CRC_LEN; + i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded, &oes->rx_discards, &nes->rx_discards); /* GLV_REPC not supported */ @@ -2338,6 +2447,40 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */ struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */ + /* Get rx/tx bytes of internal transfer packets */ + i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port), + I40E_GLV_GORCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_bytes, + &pf->internal_stats.rx_bytes); + + i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port), + I40E_GLV_GOTCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.tx_bytes, + &pf->internal_stats.tx_bytes); + /* Get total internal rx packet count */ + i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port), + I40E_GLV_UPRCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_unicast, + &pf->internal_stats.rx_unicast); + i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port), + I40E_GLV_MPRCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_multicast, + &pf->internal_stats.rx_multicast); + i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port), + I40E_GLV_BPRCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_broadcast, + &pf->internal_stats.rx_broadcast); + + /* exclude CRC size */ + pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast + + pf->internal_stats.rx_multicast + + pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN; + /* Get statistics of struct i40e_eth_stats */ i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port), I40E_GLPRT_GORCL(hw->port), @@ -2361,6 +2504,16 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast + ns->eth.rx_broadcast) * ETHER_CRC_LEN; + /* Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before + * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negtive + * value. + */ + if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes) + ns->eth.rx_bytes = 0; + /* exlude internal rx bytes */ + else + ns->eth.rx_bytes -= pf->internal_stats.rx_bytes; + i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port), pf->offset_loaded, &os->eth.rx_discards, &ns->eth.rx_discards); @@ -2388,6 +2541,13 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) &ns->eth.tx_broadcast); ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast + ns->eth.tx_broadcast) * ETHER_CRC_LEN; + + /* exclude internal tx bytes */ + if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes) + ns->eth.tx_bytes = 0; + else + ns->eth.tx_bytes -= pf->internal_stats.tx_bytes; + /* GLPRT_TEPC not supported */ /* additional port specific stats */ @@ -2538,13 +2698,14 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* call read registers - updates values, now write them to struct */ i40e_read_stats_registers(pf, hw); - stats->ipackets = pf->main_vsi->eth_stats.rx_unicast + - pf->main_vsi->eth_stats.rx_multicast + - pf->main_vsi->eth_stats.rx_broadcast - + stats->ipackets = ns->eth.rx_unicast + + ns->eth.rx_multicast + + ns->eth.rx_broadcast - + ns->eth.rx_discards - pf->main_vsi->eth_stats.rx_discards; - stats->opackets = pf->main_vsi->eth_stats.tx_unicast + - pf->main_vsi->eth_stats.tx_multicast + - pf->main_vsi->eth_stats.tx_broadcast; + stats->opackets = ns->eth.tx_unicast + + ns->eth.tx_multicast + + ns->eth.tx_broadcast; stats->ibytes = ns->eth.rx_bytes; stats->obytes = ns->eth.tx_bytes; stats->oerrors = ns->eth.tx_errors + @@ -2808,7 +2969,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *vsi = pf->main_vsi; - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = vsi->nb_qps; @@ -2912,67 +3073,93 @@ i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) } static int -i40e_vlan_tpid_set(struct rte_eth_dev *dev, - enum rte_vlan_type vlan_type, - uint16_t tpid) +i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid, int qinq) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint64_t reg_r = 0, reg_w = 0; - uint16_t reg_id = 0; - int ret = 0; - int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend; + uint64_t reg_r = 0; + uint64_t reg_w = 0; + uint16_t reg_id = 3; + int ret; - switch (vlan_type) { - case ETH_VLAN_TYPE_OUTER: - if (qinq) + if (qinq) { + if (vlan_type == ETH_VLAN_TYPE_OUTER) reg_id = 2; - else - reg_id = 3; - break; - case ETH_VLAN_TYPE_INNER: - if (qinq) - reg_id = 3; - else { - ret = -EINVAL; - PMD_DRV_LOG(ERR, - "Unsupported vlan type in single vlan.\n"); - return ret; - } - break; - default: - ret = -EINVAL; - PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type); - return ret; } + ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), ®_r, NULL); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Fail to debug read from " - "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id); - ret = -EIO; - return ret; + PMD_DRV_LOG(ERR, + "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]", + reg_id); + return -EIO; } - PMD_DRV_LOG(DEBUG, "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: " - "0x%08"PRIx64"", reg_id, reg_r); + PMD_DRV_LOG(DEBUG, + "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64, + reg_id, reg_r); reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK)); reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT); if (reg_r == reg_w) { - ret = 0; PMD_DRV_LOG(DEBUG, "No need to write"); - return ret; + return 0; } ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), reg_w, NULL); if (ret != I40E_SUCCESS) { - ret = -EIO; - PMD_DRV_LOG(ERR, "Fail to debug write to " - "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id); - return ret; + PMD_DRV_LOG(ERR, + "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]", + reg_id); + return -EIO; } - PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to " - "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id); + PMD_DRV_LOG(DEBUG, + "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]", + reg_w, reg_id); + + return 0; +} + +static int +i40e_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend; + int ret = 0; + + if ((vlan_type != ETH_VLAN_TYPE_INNER && + vlan_type != ETH_VLAN_TYPE_OUTER) || + (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) { + PMD_DRV_LOG(ERR, + "Unsupported vlan type."); + return -EINVAL; + } + /* 802.1ad frames ability is added in NVM API 1.7*/ + if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { + if (qinq) { + if (vlan_type == ETH_VLAN_TYPE_OUTER) + hw->first_tag = rte_cpu_to_le_16(tpid); + else if (vlan_type == ETH_VLAN_TYPE_INNER) + hw->second_tag = rte_cpu_to_le_16(tpid); + } else { + if (vlan_type == ETH_VLAN_TYPE_OUTER) + hw->second_tag = rte_cpu_to_le_16(tpid); + } + ret = i40e_aq_set_switch_config(hw, 0, 0, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Set switch config failed aq_err: %d", + hw->aq.asq_last_status); + ret = -EIO; + } + } else + /* If NVM API < 1.7, keep the register setting */ + ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type, + tpid, qinq); return ret; } @@ -3001,7 +3188,7 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) if (mask & ETH_VLAN_EXTEND_MASK) { if (dev->data->dev_conf.rxmode.hw_vlan_extend) { i40e_vsi_config_double_vlan(vsi, TRUE); - /* Set global registers with default ether type value */ + /* Set global registers with default ethertype. */ i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN); i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER, @@ -3073,6 +3260,13 @@ i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); fc_conf->pause_time = pf->fc_conf.pause_time; + + /* read out from register, in case they are modified by other port */ + pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = + I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT; + pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = + I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT; + fc_conf->high_water = pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]; fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]; @@ -3116,8 +3310,9 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT; if ((fc_conf->high_water > max_high_water) || (fc_conf->high_water < fc_conf->low_water)) { - PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB, " - "High_water must <= %d.", max_high_water); + PMD_INIT_LOG(ERR, + "Invalid high/low water setup value in KB, High_water must be <= %d.", + max_high_water); return -EINVAL; } @@ -3221,7 +3416,7 @@ i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev, } /* Add a MAC address, and update filters */ -static void +static int i40e_macaddr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, __rte_unused uint32_t index, @@ -3238,16 +3433,16 @@ i40e_macaddr_add(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u", pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled", pool); - return; + return -ENOTSUP; } if (pool > pf->nb_cfg_vmdq_vsi) { PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u", pool, pf->nb_cfg_vmdq_vsi); - return; + return -EINVAL; } - (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN); + rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN); if (dev->data->dev_conf.rxmode.hw_vlan_filter) mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; else @@ -3261,8 +3456,9 @@ i40e_macaddr_add(struct rte_eth_dev *dev, ret = i40e_vsi_add_mac(vsi, &mac_filter); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter"); - return; + return -ENODEV; } + return 0; } /* Remove a MAC address, and update filters */ @@ -3289,8 +3485,8 @@ i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) /* No VMDQ pool enabled or configured */ if (!(pf->flags & I40E_FLAG_VMDQ) || (i > pf->nb_cfg_vmdq_vsi)) { - PMD_DRV_LOG(ERR, "No VMDQ pool enabled" - "/configured"); + PMD_DRV_LOG(ERR, + "No VMDQ pool enabled/configured"); return; } vsi = pf->vmdq[i - 1].vsi; @@ -3351,10 +3547,10 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf, } if (add) { - (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN); - (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes, + rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN); + rte_memcpy(hw->mac.addr, new_mac->addr_bytes, ETHER_ADDR_LEN); - (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr, + rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr, ETHER_ADDR_LEN); mac_filter.filter_type = filter->filter_type; @@ -3365,7 +3561,7 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf, } ether_addr_copy(new_mac, &pf->dev_addr); } else { - (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, + rte_memcpy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr); if (ret != I40E_SUCCESS) { @@ -3491,9 +3687,9 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev, if (reta_size != lut_size || reta_size > ETH_RSS_RETA_SIZE_512) { - PMD_DRV_LOG(ERR, "The size of hash lookup table configured " - "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, lut_size); + PMD_DRV_LOG(ERR, + "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)", + reta_size, lut_size); return -EINVAL; } @@ -3532,9 +3728,9 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev, if (reta_size != lut_size || reta_size > ETH_RSS_RETA_SIZE_512) { - PMD_DRV_LOG(ERR, "The size of hash lookup table configured " - "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, lut_size); + PMD_DRV_LOG(ERR, + "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)", + reta_size, lut_size); return -EINVAL; } @@ -3589,8 +3785,9 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, mem->va = mz->addr; mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); mem->zone = (const void *)mz; - PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: " - "%"PRIu64, mz->name, mem->pa); + PMD_DRV_LOG(DEBUG, + "memzone %s allocated with physical address: %"PRIu64, + mz->name, mem->pa); return I40E_SUCCESS; } @@ -3607,9 +3804,9 @@ i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, if (!mem) return I40E_ERR_PARAM; - PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: " - "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name, - mem->pa); + PMD_DRV_LOG(DEBUG, + "memzone %s to be freed with physical address: %"PRIu64, + ((const struct rte_memzone *)mem->zone)->name, mem->pa); rte_memzone_free((const struct rte_memzone *)mem->zone); mem->zone = NULL; mem->va = NULL; @@ -3720,7 +3917,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); uint16_t qp_count = 0, vsi_count = 0; if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) { @@ -3768,9 +3965,9 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) pf->flags |= I40E_FLAG_SRIOV; pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF; pf->vf_num = pci_dev->max_vfs; - PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, " - "in total %u queues", pf->vf_num, pf->vf_nb_qps, - pf->vf_nb_qps * pf->vf_num); + PMD_DRV_LOG(DEBUG, + "%u VF VSIs, %u queues per VF VSI, in total %u queues", + pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num); } else { pf->vf_nb_qps = 0; pf->vf_num = 0; @@ -3798,14 +3995,13 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) if (pf->max_nb_vmdq_vsi) { pf->flags |= I40E_FLAG_VMDQ; pf->vmdq_nb_qps = pf->vmdq_nb_qp_max; - PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues " - "per VMDQ VSI, in total %u queues", - pf->max_nb_vmdq_vsi, - pf->vmdq_nb_qps, pf->vmdq_nb_qps * - pf->max_nb_vmdq_vsi); + PMD_DRV_LOG(DEBUG, + "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues", + pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps, + pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi); } else { - PMD_DRV_LOG(INFO, "No enough queues left for " - "VMDq"); + PMD_DRV_LOG(INFO, + "No enough queues left for VMDq"); } } else { PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq"); @@ -3818,15 +4014,15 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) pf->flags |= I40E_FLAG_DCB; if (qp_count > hw->func_caps.num_tx_qp) { - PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds " - "the hardware maximum %u", qp_count, - hw->func_caps.num_tx_qp); + PMD_DRV_LOG(ERR, + "Failed to allocate %u queues, which exceeds the hardware maximum %u", + qp_count, hw->func_caps.num_tx_qp); return -EINVAL; } if (vsi_count > hw->func_caps.num_vsis) { - PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds " - "the hardware maximum %u", vsi_count, - hw->func_caps.num_vsis); + PMD_DRV_LOG(ERR, + "Failed to allocate %u VSIs, which exceeds the hardware maximum %u", + vsi_count, hw->func_caps.num_vsis); return -EINVAL; } @@ -4072,8 +4268,8 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool, */ entry = rte_zmalloc("res_pool", sizeof(*entry), 0); if (entry == NULL) { - PMD_DRV_LOG(ERR, "Failed to allocate memory for " - "resource pool"); + PMD_DRV_LOG(ERR, + "Failed to allocate memory for resource pool"); return -ENOMEM; } entry->base = valid_entry->base; @@ -4113,9 +4309,9 @@ validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap) } if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) { - PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to " - "HW support 0x%x", hw->func_caps.enabled_tcmap, - enabled_tcmap); + PMD_DRV_LOG(ERR, + "Enabled TC map 0x%x not applicable to HW support 0x%x", + hw->func_caps.enabled_tcmap, enabled_tcmap); return I40E_NOT_SUPPORTED; } return I40E_SUCCESS; @@ -4157,7 +4353,7 @@ i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi, vsi->info.valid_sections = rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); memset(&ctxt, 0, sizeof(ctxt)); - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); ctxt.seid = vsi->seid; hw = I40E_VSI_TO_HW(vsi); @@ -4196,7 +4392,7 @@ i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap) return ret; } - (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles, + rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles, sizeof(vsi->info.qs_handle)); return I40E_SUCCESS; } @@ -4217,6 +4413,8 @@ i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi, for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) if (enabled_tcmap & (1 << i)) total_tc++; + if (total_tc == 0) + total_tc = 1; vsi->enabled_tc = enabled_tcmap; /* Number of queues per enabled TC */ @@ -4333,6 +4531,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi) hw->aq.asq_last_status); goto fail; } + veb->enabled_tc = I40E_DEFAULT_TCMAP; /* get statistics index */ ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL, @@ -4367,6 +4566,9 @@ i40e_vsi_release(struct i40e_vsi *vsi) if (!vsi) return I40E_SUCCESS; + if (!vsi->adapter) + return -EFAULT; + user_param = vsi->user_param; pf = I40E_VSI_TO_PF(vsi); @@ -4447,7 +4649,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) if (vsi->type != I40E_VSI_MAIN) return I40E_ERR_CONFIG; memset(&def_filter, 0, sizeof(def_filter)); - (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr, + rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr, ETH_ADDR_LEN); def_filter.vlan_tag = 0; def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | @@ -4457,8 +4659,8 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) struct i40e_mac_filter *f; struct ether_addr *mac; - PMD_DRV_LOG(WARNING, "Cannot remove the default " - "macvlan filter"); + PMD_DRV_LOG(DEBUG, + "Cannot remove the default macvlan filter"); /* It needs to add the permanent mac into mac list */ f = rte_zmalloc("macv_filter", sizeof(*f), 0); if (f == NULL) { @@ -4466,7 +4668,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) return I40E_ERR_NO_MEMORY; } mac = &f->mac_info.mac_addr; - (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr, + rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr, ETH_ADDR_LEN); f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH; TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); @@ -4474,7 +4676,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) return ret; } - (void)rte_memcpy(&filter.mac_addr, + rte_memcpy(&filter.mac_addr, (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN); filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; return i40e_vsi_add_mac(vsi, &filter); @@ -4508,8 +4710,9 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi) ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &ets_sla_config, NULL); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith " - "configuration %u", hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "VSI failed to get TC bandwdith configuration %u", + hw->aq.asq_last_status); return ret; } @@ -4576,7 +4779,7 @@ i40e_enable_pf_lb(struct i40e_pf *pf) ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) - PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n", + PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d", hw->aq.asq_last_status); } @@ -4597,14 +4800,14 @@ i40e_vsi_setup(struct i40e_pf *pf, if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV && uplink_vsi == NULL) { - PMD_DRV_LOG(ERR, "VSI setup failed, " - "VSI link shouldn't be NULL"); + PMD_DRV_LOG(ERR, + "VSI setup failed, VSI link shouldn't be NULL"); return NULL; } if (type == I40E_VSI_MAIN && uplink_vsi != NULL) { - PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI " - "uplink VSI should be NULL"); + PMD_DRV_LOG(ERR, + "VSI setup failed, MAIN VSI uplink VSI should be NULL"); return NULL; } @@ -4648,6 +4851,8 @@ i40e_vsi_setup(struct i40e_pf *pf, vsi->max_macaddrs = I40E_NUM_MACADDR_MAX; vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi; vsi->user_param = user_param; + vsi->vlan_anti_spoof_on = 0; + vsi->vlan_filter_on = 0; /* Allocate queues */ switch (vsi->type) { case I40E_VSI_MAIN : @@ -4732,7 +4937,7 @@ i40e_vsi_setup(struct i40e_pf *pf, PMD_DRV_LOG(ERR, "Failed to get VSI params"); goto fail_msix_alloc; } - (void)rte_memcpy(&vsi->info, &ctxt.info, + rte_memcpy(&vsi->info, &ctxt.info, sizeof(struct i40e_aqc_vsi_properties_data)); vsi->vsi_id = ctxt.vsi_number; vsi->info.valid_sections = 0; @@ -4750,13 +4955,13 @@ i40e_vsi_setup(struct i40e_pf *pf, rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; - (void)rte_memcpy(&ctxt.info, &vsi->info, + rte_memcpy(&ctxt.info, &vsi->info, sizeof(struct i40e_aqc_vsi_properties_data)); ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, I40E_DEFAULT_TCMAP); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to configure " - "TC queue mapping"); + PMD_DRV_LOG(ERR, + "Failed to configure TC queue mapping"); goto fail_msix_alloc; } ctxt.seid = vsi->seid; @@ -4771,15 +4976,15 @@ i40e_vsi_setup(struct i40e_pf *pf, goto fail_msix_alloc; } - (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, + rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, sizeof(vsi->info.tc_mapping)); - (void)rte_memcpy(&vsi->info.queue_mapping, + rte_memcpy(&vsi->info.queue_mapping, &ctxt.info.queue_mapping, sizeof(vsi->info.queue_mapping)); vsi->info.mapping_flags = ctxt.info.mapping_flags; vsi->info.valid_sections = 0; - (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr, + rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr, ETH_ADDR_LEN); /** @@ -4824,13 +5029,14 @@ i40e_vsi_setup(struct i40e_pf *pf, rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, - I40E_DEFAULT_TCMAP); + hw->func_caps.enabled_tcmap); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to configure " - "TC queue mapping"); + PMD_DRV_LOG(ERR, + "Failed to configure TC queue mapping"); goto fail_msix_alloc; } - ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; + + ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap; ctxt.info.valid_sections |= rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID); /** @@ -4869,8 +5075,8 @@ i40e_vsi_setup(struct i40e_pf *pf, ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, I40E_DEFAULT_TCMAP); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to configure " - "TC queue mapping"); + PMD_DRV_LOG(ERR, + "Failed to configure TC queue mapping"); goto fail_msix_alloc; } ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; @@ -4887,8 +5093,8 @@ i40e_vsi_setup(struct i40e_pf *pf, ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, I40E_DEFAULT_TCMAP); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to configure " - "TC queue mapping."); + PMD_DRV_LOG(ERR, + "Failed to configure TC queue mapping."); goto fail_msix_alloc; } ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; @@ -4921,7 +5127,7 @@ i40e_vsi_setup(struct i40e_pf *pf, } /* MAC/VLAN configuration */ - (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN); + rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN); filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; ret = i40e_vsi_add_mac(vsi, &filter); @@ -5033,7 +5239,7 @@ i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on) vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK); vsi->info.port_vlan_flags |= vlan_flags; ctxt.seid = vsi->seid; - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping", @@ -5141,6 +5347,8 @@ i40e_pf_setup(struct i40e_pf *pf) pf->offset_loaded = FALSE; memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats)); memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats)); + memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats)); + memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats)); ret = i40e_pf_get_switch_config(pf); if (ret != I40E_SUCCESS) { @@ -5151,8 +5359,9 @@ i40e_pf_setup(struct i40e_pf *pf) /* make queue allocated first, let FDIR use queue pair 0*/ ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR); if (ret != I40E_FDIR_QUEUE_ID) { - PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :" - " ret =%d", ret); + PMD_DRV_LOG(ERR, + "queue allocation fails for FDIR: ret =%d", + ret); pf->flags &= ~I40E_FLAG_FDIR; } } @@ -5171,12 +5380,12 @@ i40e_pf_setup(struct i40e_pf *pf) else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512) settings.hash_lut_size = I40E_HASH_LUT_SIZE_512; else { - PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n", - hw->func_caps.rss_table_size); + PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported", + hw->func_caps.rss_table_size); return I40E_ERR_PARAM; } - PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table " - "size: %u\n", hw->func_caps.rss_table_size); + PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u", + hw->func_caps.rss_table_size); pf->hash_lut_size = hw->func_caps.rss_table_size; /* Enable ethtype and macvlan filters */ @@ -5426,8 +5635,8 @@ i40e_dev_rx_init(struct i40e_pf *pf) ret = i40e_rx_queue_init(rxq); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to do RX queue " - "initialization"); + PMD_DRV_LOG(ERR, + "Failed to do RX queue initialization"); break; } } @@ -5654,16 +5863,16 @@ i40e_dev_handle_vfr_event(struct rte_eth_dev *dev) index = abs_vf_id / I40E_UINT32_BIT_SIZE; offset = abs_vf_id % I40E_UINT32_BIT_SIZE; val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index)); - /* VFR event occured */ + /* VFR event occurred */ if (val & (0x1 << offset)) { int ret; /* Clear the event first */ I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index), (0x1 << offset)); - PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id); + PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id); /** - * Only notify a VF reset event occured, + * Only notify a VF reset event occurred, * don't trigger another SW reset */ ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0); @@ -5677,18 +5886,10 @@ static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct i40e_virtchnl_pf_event event; int i; - event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; - event.event_data.link_event.link_status = - dev->data->dev_link.link_status; - event.event_data.link_event.link_speed = - (enum i40e_aq_link_speed)dev->data->dev_link.link_speed; - for (i = 0; i < pf->vf_num; i++) - i40e_pf_host_send_msg_to_vf(&pf->vfs[i], I40E_VIRTCHNL_OP_EVENT, - I40E_SUCCESS, (uint8_t *)&event, sizeof(event)); + i40e_notify_vf_link_status(dev, &pf->vfs[i]); } static void @@ -5711,8 +5912,9 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) ret = i40e_clean_arq_element(hw, &info, &pending); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, " - "aq_err: %u", hw->aq.asq_last_status); + PMD_DRV_LOG(INFO, + "Failed to read msg from AdminQ, aq_err: %u", + hw->aq.asq_last_status); break; } opcode = rte_le_to_cpu_16(info.desc.opcode); @@ -5729,14 +5931,12 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) break; case i40e_aqc_opc_get_link_status: ret = i40e_dev_link_update(dev, 0); - if (!ret) { - i40e_notify_all_vfs_link_status(dev); + if (!ret) _rte_eth_dev_callback_process(dev, - RTE_ETH_EVENT_INTR_LSC, NULL); - } + RTE_ETH_EVENT_INTR_LSC, NULL, NULL); break; default: - PMD_DRV_LOG(ERR, "Request %u is not supported yet", + PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", opcode); break; } @@ -5757,8 +5957,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) * void */ static void -i40e_dev_interrupt_handler(struct rte_intr_handle *intr_handle, - void *param) +i40e_dev_interrupt_handler(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -5775,7 +5974,6 @@ i40e_dev_interrupt_handler(struct rte_intr_handle *intr_handle, PMD_DRV_LOG(INFO, "No interrupt event"); goto done; } -#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK) PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error"); if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) @@ -5790,7 +5988,6 @@ i40e_dev_interrupt_handler(struct rte_intr_handle *intr_handle, PMD_DRV_LOG(ERR, "ICR0: HMC error"); if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK) PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error"); -#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { PMD_DRV_LOG(INFO, "ICR0: VF reset detected"); @@ -5804,10 +6001,10 @@ i40e_dev_interrupt_handler(struct rte_intr_handle *intr_handle, done: /* Enable interrupt */ i40e_pf_enable_irq0(hw); - rte_intr_enable(intr_handle); + rte_intr_enable(dev->intr_handle); } -static int +int i40e_add_macvlan_filters(struct i40e_vsi *vsi, struct i40e_macvlan_filter *filter, int total) @@ -5836,7 +6033,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi, memset(req_list, 0, ele_buff_size); for (i = 0; i < actual_num; i++) { - (void)rte_memcpy(req_list[i].mac_addr, + rte_memcpy(req_list[i].mac_addr, &filter[num + i].macaddr, ETH_ADDR_LEN); req_list[i].vlan_tag = rte_cpu_to_le_16(filter[num + i].vlan_id); @@ -5857,7 +6054,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi, flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH; break; default: - PMD_DRV_LOG(ERR, "Invalid MAC match type\n"); + PMD_DRV_LOG(ERR, "Invalid MAC match type"); ret = I40E_ERR_PARAM; goto DONE; } @@ -5881,7 +6078,7 @@ DONE: return ret; } -static int +int i40e_remove_macvlan_filters(struct i40e_vsi *vsi, struct i40e_macvlan_filter *filter, int total) @@ -5911,7 +6108,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi, memset(req_list, 0, ele_buff_size); for (i = 0; i < actual_num; i++) { - (void)rte_memcpy(req_list[i].mac_addr, + rte_memcpy(req_list[i].mac_addr, &filter[num + i].macaddr, ETH_ADDR_LEN); req_list[i].vlan_tag = rte_cpu_to_le_16(filter[num + i].vlan_id); @@ -5932,7 +6129,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi, flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH; break; default: - PMD_DRV_LOG(ERR, "Invalid MAC filter type\n"); + PMD_DRV_LOG(ERR, "Invalid MAC filter type"); ret = I40E_ERR_PARAM; goto DONE; } @@ -5987,14 +6184,11 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi, } static void -i40e_set_vlan_filter(struct i40e_vsi *vsi, - uint16_t vlan_id, bool on) +i40e_store_vlan_filter(struct i40e_vsi *vsi, + uint16_t vlan_id, bool on) { uint32_t vid_idx, vid_bit; - if (vlan_id > ETH_VLAN_ID_MAX) - return; - vid_idx = I40E_VFTA_IDX(vlan_id); vid_bit = I40E_VFTA_BIT(vlan_id); @@ -6004,11 +6198,43 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi, vsi->vfta[vid_idx] &= ~vid_bit; } +void +i40e_set_vlan_filter(struct i40e_vsi *vsi, + uint16_t vlan_id, bool on) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0}; + int ret; + + if (vlan_id > ETH_VLAN_ID_MAX) + return; + + i40e_store_vlan_filter(vsi, vlan_id, on); + + if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id) + return; + + vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id); + + if (on) { + ret = i40e_aq_add_vlan(hw, vsi->seid, + &vlan_data, 1, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to add vlan filter"); + } else { + ret = i40e_aq_remove_vlan(hw, vsi->seid, + &vlan_data, 1, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, + "Failed to remove vlan filter"); + } +} + /** * Find all vlan options for specific mac addr, * return with actual vlan found. */ -static inline int +int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, struct i40e_macvlan_filter *mv_f, int num, struct ether_addr *addr) @@ -6029,11 +6255,11 @@ i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) { if (vsi->vfta[j] & (1 << k)) { if (i > num - 1) { - PMD_DRV_LOG(ERR, "vlan number " - "not match"); + PMD_DRV_LOG(ERR, + "vlan number doesn't match"); return I40E_ERR_PARAM; } - (void)rte_memcpy(&mv_f[i].macaddr, + rte_memcpy(&mv_f[i].macaddr, addr, ETH_ADDR_LEN); mv_f[i].vlan_id = j * I40E_UINT32_BIT_SIZE + k; @@ -6062,7 +6288,7 @@ i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi, PMD_DRV_LOG(ERR, "buffer number not match"); return I40E_ERR_PARAM; } - (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, + rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); mv_f[i].vlan_id = vlan; mv_f[i].filter_type = f->mac_info.filter_type; @@ -6075,7 +6301,7 @@ i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi, static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi) { - int i, num; + int i, j, num; struct i40e_mac_filter *f; struct i40e_macvlan_filter *mv_f; int ret = I40E_SUCCESS; @@ -6098,8 +6324,9 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi) i = 0; if (vsi->vlan_num == 0) { TAILQ_FOREACH(f, &vsi->mac_list, next) { - (void)rte_memcpy(&mv_f[i].macaddr, + rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); + mv_f[i].filter_type = f->mac_info.filter_type; mv_f[i].vlan_id = 0; i++; } @@ -6109,6 +6336,8 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi) vsi->vlan_num, &f->mac_info.mac_addr); if (ret != I40E_SUCCESS) goto DONE; + for (j = i; j < i + vsi->vlan_num; j++) + mv_f[j].filter_type = f->mac_info.filter_type; i += vsi->vlan_num; } } @@ -6265,7 +6494,7 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) for (i = 0; i < vlan_num; i++) { mv_f[i].filter_type = mac_filter->filter_type; - (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr, + rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr, ETH_ADDR_LEN); } @@ -6288,7 +6517,7 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) ret = I40E_ERR_NO_MEMORY; goto DONE; } - (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr, + rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr, ETH_ADDR_LEN); f->mac_info.filter_type = mac_filter->filter_type; TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); @@ -6320,7 +6549,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr) if (filter_type == RTE_MACVLAN_PERFECT_MATCH || filter_type == RTE_MACVLAN_HASH_MATCH) { if (vlan_num == 0) { - PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n"); + PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0"); return I40E_ERR_PARAM; } } else if (filter_type == RTE_MAC_PERFECT_MATCH || @@ -6335,7 +6564,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr) for (i = 0; i < vlan_num; i++) { mv_f[i].filter_type = filter_type; - (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, + rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); } if (filter_type == RTE_MACVLAN_PERFECT_MATCH || @@ -6503,8 +6732,7 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw); if (ret) - PMD_INIT_LOG(ERR, "Failed to configure RSS key " - "via AQ"); + PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ"); } else { uint32_t *hash_key = (uint32_t *)key; uint16_t i; @@ -6652,18 +6880,27 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag) /* Convert tunnel filter structure */ static int -i40e_tunnel_filter_convert(struct i40e_aqc_add_remove_cloud_filters_element_data - *cld_filter, - struct i40e_tunnel_filter *tunnel_filter) +i40e_tunnel_filter_convert( + struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter, + struct i40e_tunnel_filter *tunnel_filter) { - ether_addr_copy((struct ether_addr *)&cld_filter->outer_mac, + ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac, (struct ether_addr *)&tunnel_filter->input.outer_mac); - ether_addr_copy((struct ether_addr *)&cld_filter->inner_mac, + ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac, (struct ether_addr *)&tunnel_filter->input.inner_mac); - tunnel_filter->input.inner_vlan = cld_filter->inner_vlan; - tunnel_filter->input.flags = cld_filter->flags; - tunnel_filter->input.tenant_id = cld_filter->tenant_id; - tunnel_filter->queue = cld_filter->queue_number; + tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan; + if ((rte_le_to_cpu_16(cld_filter->element.flags) & + I40E_AQC_ADD_CLOUD_FLAGS_IPV6) == + I40E_AQC_ADD_CLOUD_FLAGS_IPV6) + tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6; + else + tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4; + tunnel_filter->input.flags = cld_filter->element.flags; + tunnel_filter->input.tenant_id = cld_filter->element.tenant_id; + tunnel_filter->queue = cld_filter->element.queue_number; + rte_memcpy(tunnel_filter->input.general_fields, + cld_filter->general_fields, + sizeof(cld_filter->general_fields)); return 0; } @@ -6742,40 +6979,44 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, int val, ret = 0; struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_vsi *vsi = pf->main_vsi; - struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter; - struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter; + struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter; + struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter; struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; struct i40e_tunnel_filter *tunnel, *node; struct i40e_tunnel_filter check_filter; /* Check if filter exists */ cld_filter = rte_zmalloc("tunnel_filter", - sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data), - 0); + sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext), + 0); if (NULL == cld_filter) { PMD_DRV_LOG(ERR, "Failed to alloc memory."); - return -EINVAL; + return -ENOMEM; } pfilter = cld_filter; - ether_addr_copy(&tunnel_filter->outer_mac, (struct ether_addr*)&pfilter->outer_mac); - ether_addr_copy(&tunnel_filter->inner_mac, (struct ether_addr*)&pfilter->inner_mac); + ether_addr_copy(&tunnel_filter->outer_mac, + (struct ether_addr *)&pfilter->element.outer_mac); + ether_addr_copy(&tunnel_filter->inner_mac, + (struct ether_addr *)&pfilter->element.inner_mac); - pfilter->inner_vlan = rte_cpu_to_le_16(tunnel_filter->inner_vlan); + pfilter->element.inner_vlan = + rte_cpu_to_le_16(tunnel_filter->inner_vlan); if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr); - rte_memcpy(&pfilter->ipaddr.v4.data, + rte_memcpy(&pfilter->element.ipaddr.v4.data, &rte_cpu_to_le_32(ipv4_addr), - sizeof(pfilter->ipaddr.v4.data)); + sizeof(pfilter->element.ipaddr.v4.data)); } else { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; for (i = 0; i < 4; i++) { convert_ipv6[i] = rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i])); } - rte_memcpy(&pfilter->ipaddr.v6.data, &convert_ipv6, - sizeof(pfilter->ipaddr.v6.data)); + rte_memcpy(&pfilter->element.ipaddr.v6.data, + &convert_ipv6, + sizeof(pfilter->element.ipaddr.v6.data)); } /* check tunneled type */ @@ -6797,17 +7038,18 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, } val = i40e_dev_get_filter_type(tunnel_filter->filter_type, - &pfilter->flags); + &pfilter->element.flags); if (val < 0) { rte_free(cld_filter); return -EINVAL; } - pfilter->flags |= rte_cpu_to_le_16( + pfilter->element.flags |= rte_cpu_to_le_16( I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)); - pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); - pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id); + pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->element.queue_number = + rte_cpu_to_le_16(tunnel_filter->queue_id); /* Check if there is the filter in SW list */ memset(&check_filter, 0, sizeof(check_filter)); @@ -6824,20 +7066,338 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, } if (add) { - ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1); + ret = i40e_aq_add_cloud_filters(hw, + vsi->seid, &cld_filter->element, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to add a tunnel filter."); - return ret; + return -ENOTSUP; } tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); rte_memcpy(tunnel, &check_filter, sizeof(check_filter)); ret = i40e_sw_tunnel_filter_insert(pf, tunnel); } else { ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, - cld_filter, 1); + &cld_filter->element, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); - return ret; + return -ENOTSUP; + } + ret = i40e_sw_tunnel_filter_del(pf, &node->input); + } + + rte_free(cld_filter); + return ret; +} + +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48 +#define I40E_TR_VXLAN_GRE_KEY_MASK 0x4 +#define I40E_TR_GENEVE_KEY_MASK 0x8 +#define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40 +#define I40E_TR_GRE_KEY_MASK 0x400 +#define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800 +#define I40E_TR_GRE_NO_KEY_MASK 0x8000 + +static enum +i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) +{ + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + enum i40e_status_code status = I40E_SUCCESS; + + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + /* create L1 filter */ + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS; + filter_replace.tr_bit = 0; + + /* Prepare the buffer, 3 entries */ + filter_replace_buf.data[0] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[2] = 0xFF; + filter_replace_buf.data[3] = 0xFF; + filter_replace_buf.data[4] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[7] = 0xF0; + filter_replace_buf.data[8] + = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0; + filter_replace_buf.data[8] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK | + I40E_TR_GENEVE_KEY_MASK | + I40E_TR_GENERIC_UDP_TUNNEL_MASK; + filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK | + I40E_TR_GRE_KEY_WITH_XSUM_MASK | + I40E_TR_GRE_NO_KEY_MASK) >> 8; + + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + return status; +} + +static enum +i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) +{ + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + enum i40e_status_code status = I40E_SUCCESS; + + /* For MPLSoUDP */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER | + I40E_AQC_MIRROR_CLOUD_FILTER; + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (status < 0) + return status; + + /* For MPLSoGRE */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER | + I40E_AQC_MIRROR_CLOUD_FILTER; + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + return status; +} + +int +i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, + struct i40e_tunnel_filter_conf *tunnel_filter, + uint8_t add) +{ + uint16_t ip_type; + uint32_t ipv4_addr; + uint8_t i, tun_type = 0; + /* internal variable to convert ipv6 byte order */ + uint32_t convert_ipv6[4]; + int val, ret = 0; + struct i40e_pf_vf *vf = NULL; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi; + struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter; + struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter; + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; + struct i40e_tunnel_filter *tunnel, *node; + struct i40e_tunnel_filter check_filter; /* Check if filter exists */ + uint32_t teid_le; + bool big_buffer = 0; + + cld_filter = rte_zmalloc("tunnel_filter", + sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext), + 0); + + if (cld_filter == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + pfilter = cld_filter; + + ether_addr_copy(&tunnel_filter->outer_mac, + (struct ether_addr *)&pfilter->element.outer_mac); + ether_addr_copy(&tunnel_filter->inner_mac, + (struct ether_addr *)&pfilter->element.inner_mac); + + pfilter->element.inner_vlan = + rte_cpu_to_le_16(tunnel_filter->inner_vlan); + if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; + ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr); + rte_memcpy(&pfilter->element.ipaddr.v4.data, + &rte_cpu_to_le_32(ipv4_addr), + sizeof(pfilter->element.ipaddr.v4.data)); + } else { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; + for (i = 0; i < 4; i++) { + convert_ipv6[i] = + rte_cpu_to_le_32(rte_be_to_cpu_32( + tunnel_filter->ip_addr.ipv6_addr[i])); + } + rte_memcpy(&pfilter->element.ipaddr.v6.data, + &convert_ipv6, + sizeof(pfilter->element.ipaddr.v6.data)); + } + + /* check tunneled type */ + switch (tunnel_filter->tunnel_type) { + case I40E_TUNNEL_TYPE_VXLAN: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN; + break; + case I40E_TUNNEL_TYPE_NVGRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC; + break; + case I40E_TUNNEL_TYPE_IP_IN_GRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP; + break; + case I40E_TUNNEL_TYPE_MPLSoUDP: + if (!pf->mpls_replace_flag) { + i40e_replace_mpls_l1_filter(pf); + i40e_replace_mpls_cloud_filter(pf); + pf->mpls_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] = + teid_le >> 4; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] = + (teid_le & 0xF) << 12; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] = + 0x40; + big_buffer = 1; + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP; + break; + case I40E_TUNNEL_TYPE_MPLSoGRE: + if (!pf->mpls_replace_flag) { + i40e_replace_mpls_l1_filter(pf); + i40e_replace_mpls_cloud_filter(pf); + pf->mpls_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] = + teid_le >> 4; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] = + (teid_le & 0xF) << 12; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] = + 0x0; + big_buffer = 1; + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE; + break; + case I40E_TUNNEL_TYPE_QINQ: + if (!pf->qinq_replace_flag) { + ret = i40e_cloud_filter_qinq_create(pf); + if (ret < 0) + PMD_DRV_LOG(DEBUG, + "QinQ tunnel filter already created."); + pf->qinq_replace_flag = 1; + } + /* Add in the General fields the values of + * the Outer and Inner VLAN + * Big Buffer should be set, see changes in + * i40e_aq_add_cloud_filters + */ + pfilter->general_fields[0] = tunnel_filter->inner_vlan; + pfilter->general_fields[1] = tunnel_filter->outer_vlan; + big_buffer = 1; + break; + default: + /* Other tunnel types is not supported. */ + PMD_DRV_LOG(ERR, "tunnel type is not supported."); + rte_free(cld_filter); + return -EINVAL; + } + + if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP) + pfilter->element.flags = + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP; + else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE) + pfilter->element.flags = + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE; + else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ) + pfilter->element.flags |= + I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + else { + val = i40e_dev_get_filter_type(tunnel_filter->filter_type, + &pfilter->element.flags); + if (val < 0) { + rte_free(cld_filter); + return -EINVAL; + } + } + + pfilter->element.flags |= rte_cpu_to_le_16( + I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | + ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)); + pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->element.queue_number = + rte_cpu_to_le_16(tunnel_filter->queue_id); + + if (!tunnel_filter->is_to_vf) + vsi = pf->main_vsi; + else { + if (tunnel_filter->vf_id >= pf->vf_num) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + vf = &pf->vfs[tunnel_filter->vf_id]; + vsi = vf->vsi; + } + + /* Check if there is the filter in SW list */ + memset(&check_filter, 0, sizeof(check_filter)); + i40e_tunnel_filter_convert(cld_filter, &check_filter); + check_filter.is_to_vf = tunnel_filter->is_to_vf; + check_filter.vf_id = tunnel_filter->vf_id; + node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input); + if (add && node) { + PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!"); + return -EINVAL; + } + + if (!add && !node) { + PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!"); + return -EINVAL; + } + + if (add) { + if (big_buffer) + ret = i40e_aq_add_cloud_filters_big_buffer(hw, + vsi->seid, cld_filter, 1); + else + ret = i40e_aq_add_cloud_filters(hw, + vsi->seid, &cld_filter->element, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add a tunnel filter."); + return -ENOTSUP; + } + tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); + rte_memcpy(tunnel, &check_filter, sizeof(check_filter)); + ret = i40e_sw_tunnel_filter_insert(pf, tunnel); + } else { + if (big_buffer) + ret = i40e_aq_remove_cloud_filters_big_buffer( + hw, vsi->seid, cld_filter, 1); + else + ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, + &cld_filter->element, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); + return -ENOTSUP; } ret = i40e_sw_tunnel_filter_del(pf, &node->input); } @@ -6877,8 +7437,9 @@ i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port) /* Now check if there is space to add the new port */ idx = i40e_get_vxlan_port_idx(pf, 0); if (idx < 0) { - PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached," - "not adding port %d", port); + PMD_DRV_LOG(ERR, + "Maximum number of UDP ports reached, not adding port %d", + port); return -ENOSPC; } @@ -7028,7 +7589,7 @@ i40e_pf_config_rss(struct i40e_pf *pf) /* * If both VMDQ and RSS enabled, not all of PF queues are configured. - * It's necessary to calulate the actual PF queues that are configured. + * It's necessary to calculate the actual PF queues that are configured. */ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) num = i40e_pf_calc_configured_queues_num(pf); @@ -7117,7 +7678,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) int ret = -EINVAL; val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)); - PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val); + PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val); if (len == 3) { reg = val | I40E_GL_PRS_FVBM_MSK_ENA; @@ -7136,7 +7697,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) } else { ret = 0; } - PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n", + PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x", I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2))); return ret; @@ -7249,15 +7810,15 @@ i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable) if (enable > 0) { if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) { - PMD_DRV_LOG(INFO, "Symmetric hash has already " - "been enabled"); + PMD_DRV_LOG(INFO, + "Symmetric hash has already been enabled"); return; } reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK; } else { if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) { - PMD_DRV_LOG(INFO, "Symmetric hash has already " - "been disabled"); + PMD_DRV_LOG(INFO, + "Symmetric hash has already been disabled"); return; } reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK; @@ -7374,23 +7935,60 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw, pctype = i40e_flowtype_to_pctype(i); reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ? I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg); + if (hw->mac.type == I40E_MAC_X722) { + if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP), + reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP), + reg); + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK), + reg); + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP), + reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP), + reg); + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK), + reg); + } else { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), + reg); + } + } else { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg); + } } reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) { /* Toeplitz */ if (reg & I40E_GLQF_CTL_HTOEP_MASK) { - PMD_DRV_LOG(DEBUG, "Hash function already set to " - "Toeplitz"); + PMD_DRV_LOG(DEBUG, + "Hash function already set to Toeplitz"); goto out; } reg |= I40E_GLQF_CTL_HTOEP_MASK; } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { /* Simple XOR */ if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) { - PMD_DRV_LOG(DEBUG, "Hash function already set to " - "Simple XOR"); + PMD_DRV_LOG(DEBUG, + "Hash function already set to Simple XOR"); goto out; } reg &= ~I40E_GLQF_CTL_HTOEP_MASK; @@ -7667,7 +8265,7 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, /** * Validate if the input set is allowed for a specific PCTYPE */ -static int +int i40e_validate_input_set(enum i40e_filter_pctype pctype, enum rte_filter_type filter, uint64_t inset) { @@ -7842,7 +8440,7 @@ i40e_parse_input_set(uint64_t *inset, * Translate the input set from bit masks to register aware bit masks * and vice versa */ -static uint64_t +uint64_t i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input) { uint64_t val = 0; @@ -7927,7 +8525,7 @@ i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input) return val; } -static int +int i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem) { uint8_t i, idx = 0; @@ -7975,15 +8573,15 @@ i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem) return idx; } -static void +void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) { uint32_t reg = i40e_read_rx_ctl(hw, addr); - PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg); + PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg); if (reg != val) i40e_write_rx_ctl(hw, addr, val); - PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr, + PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr, (uint32_t)i40e_read_rx_ctl(hw, addr)); } @@ -8393,13 +8991,14 @@ i40e_ethertype_filter_set(struct i40e_pf *pf, } if (filter->ether_type == ETHER_TYPE_IPv4 || filter->ether_type == ETHER_TYPE_IPv6) { - PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" - " control packet filter.", filter->ether_type); + PMD_DRV_LOG(ERR, + "unsupported ether_type(0x%04x) in control packet filter.", + filter->ether_type); return -EINVAL; } if (filter->ether_type == ETHER_TYPE_VLAN) - PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is" - " not supported."); + PMD_DRV_LOG(WARNING, + "filter vlan ether_type in first tag is not supported."); /* Check if there is the filter in SW list */ memset(&check_filter, 0, sizeof(check_filter)); @@ -8429,11 +9028,10 @@ i40e_ethertype_filter_set(struct i40e_pf *pf, pf->main_vsi->seid, filter->queue, add, &stats, NULL); - PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d," - " mac_etype_used = %u, etype_used = %u," - " mac_etype_free = %u, etype_free = %u\n", - ret, stats.mac_etype_used, stats.etype_used, - stats.mac_etype_free, stats.etype_free); + PMD_DRV_LOG(INFO, + "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u", + ret, stats.mac_etype_used, stats.etype_used, + stats.mac_etype_free, stats.etype_free); if (ret < 0) return -ENOSYS; @@ -8483,7 +9081,7 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev, FALSE); break; default: - PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); + PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); ret = -ENOSYS; break; } @@ -8543,11 +9141,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev, static void i40e_enable_extended_tag(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); uint32_t buf = 0; int ret; - ret = rte_eal_pci_read_config(pci_dev, &buf, sizeof(buf), + ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf), PCI_DEV_CAP_REG); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", @@ -8560,7 +9158,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev) } buf = 0; - ret = rte_eal_pci_read_config(pci_dev, &buf, sizeof(buf), + ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf), PCI_DEV_CTRL_REG); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", @@ -8572,7 +9170,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev) return; } buf |= PCI_DEV_CTRL_EXT_TAG_MASK; - ret = rte_eal_pci_write_config(pci_dev, &buf, sizeof(buf), + ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf), PCI_DEV_CTRL_REG); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x", @@ -8681,12 +9279,17 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) */ /* For both X710 and XL710 */ -#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200 -#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00 +#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200 +#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x20000200 +#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200 #define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08 +/* For X722 */ +#define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200 +#define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200 + /* For X710 */ #define I40E_GL_SWR_PM_UP_THR_EF_VALUE 0x03030303 /* For XL710 */ @@ -8699,17 +9302,25 @@ i40e_dev_sync_phy_type(struct i40e_hw *hw) enum i40e_status_code status; struct i40e_aq_get_phy_abilities_resp phy_ab; int ret = -ENOTSUP; + int retries = 0; status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab, NULL); - if (status) - return ret; - + while (status) { + PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d", + status); + retries++; + rte_delay_us(100000); + if (retries < 5) + status = i40e_aq_get_phy_capabilities(hw, false, + true, &phy_ab, NULL); + else + return ret; + } return 0; } - static void i40e_configure_registers(struct i40e_hw *hw) { @@ -8717,8 +9328,8 @@ i40e_configure_registers(struct i40e_hw *hw) uint32_t addr; uint64_t val; } reg_table[] = { - {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE}, - {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE}, + {I40E_GL_SWR_PRI_JOIN_MAP_0, 0}, + {I40E_GL_SWR_PRI_JOIN_MAP_2, 0}, {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */ }; uint64_t reg; @@ -8726,6 +9337,28 @@ i40e_configure_registers(struct i40e_hw *hw) int ret; for (i = 0; i < RTE_DIM(reg_table); i++) { + if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) { + if (hw->mac.type == I40E_MAC_X722) /* For X722 */ + reg_table[i].val = + I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE; + else /* For X710/XL710/XXV710 */ + if (hw->aq.fw_maj_ver < 6) + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1; + else + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2; + } + + if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) { + if (hw->mac.type == I40E_MAC_X722) /* For X722 */ + reg_table[i].val = + I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE; + else /* For X710/XL710/XXV710 */ + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE; + } + if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) { if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */ I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */ @@ -8751,9 +9384,9 @@ i40e_configure_registers(struct i40e_hw *hw) ret = i40e_aq_debug_write_register(hw, reg_table[i].addr, reg_table[i].val, NULL); if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the " - "address of 0x%"PRIx32, reg_table[i].val, - reg_table[i].addr); + PMD_DRV_LOG(ERR, + "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32, + reg_table[i].val, reg_table[i].addr); break; } PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of " @@ -8798,8 +9431,9 @@ i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi) I40E_VSI_L2TAGSTXVALID( vsi->vsi_id), reg, NULL); if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to update " - "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id); + PMD_DRV_LOG(ERR, + "Failed to update VSI_L2TAGSTXVALID[%d]", + vsi->vsi_id); return I40E_ERR_CONFIG; } } @@ -8850,11 +9484,10 @@ i40e_aq_add_mirror_rule(struct i40e_hw *hw, rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd)); status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL); - PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d," - "rule_id = %u" - " mirror_rules_used = %u, mirror_rules_free = %u,", - hw->aq.asq_last_status, resp->rule_id, - resp->mirror_rules_used, resp->mirror_rules_free); + PMD_DRV_LOG(INFO, + "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,", + hw->aq.asq_last_status, resp->rule_id, + resp->mirror_rules_used, resp->mirror_rules_free); *rule_id = rte_le_to_cpu_16(resp->rule_id); return status; @@ -8932,8 +9565,8 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev, PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id); if (pf->main_vsi->veb == NULL || pf->vfs == NULL) { - PMD_DRV_LOG(ERR, "mirror rule can not be configured" - " without veb or vfs."); + PMD_DRV_LOG(ERR, + "mirror rule can not be configured without veb or vfs."); return -ENOSYS; } if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) { @@ -8965,9 +9598,9 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev, mirr_rule->entries, mirr_rule->num_entries, mirr_rule->id); if (ret < 0) { - PMD_DRV_LOG(ERR, "failed to remove mirror rule:" - " ret = %d, aq_err = %d.", - ret, hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "failed to remove mirror rule: ret = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); return -ENOSYS; } TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules); @@ -9056,9 +9689,9 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev, mirr_rule->rule_type, mirr_rule->entries, j, &rule_id); if (ret < 0) { - PMD_DRV_LOG(ERR, "failed to add mirror rule:" - " ret = %d, aq_err = %d.", - ret, hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "failed to add mirror rule: ret = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); rte_free(mirr_rule); return -ENOSYS; } @@ -9110,9 +9743,9 @@ i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id) mirr_rule->entries, mirr_rule->num_entries, mirr_rule->id); if (ret < 0) { - PMD_DRV_LOG(ERR, "failed to remove mirror rule:" - " status = %d, aq_err = %d.", - ret, hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "failed to remove mirror rule: status = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); return -ENOSYS; } TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules); @@ -9544,9 +10177,9 @@ i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map) ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid, &veb_bw, NULL); if (ret) { - PMD_INIT_LOG(ERR, "AQ command Config switch_comp BW allocation" - " per TC failed = %d", - hw->aq.asq_last_status); + PMD_INIT_LOG(ERR, + "AQ command Config switch_comp BW allocation per TC failed = %d", + hw->aq.asq_last_status); return ret; } @@ -9554,16 +10187,18 @@ i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map) ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, &ets_query, NULL); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to get switch_comp ETS" - " configuration %u", hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "Failed to get switch_comp ETS configuration %u", + hw->aq.asq_last_status); return ret; } memset(&bw_query, 0, sizeof(bw_query)); ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, &bw_query, NULL); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to get switch_comp bandwidth" - " configuration %u", hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "Failed to get switch_comp bandwidth configuration %u", + hw->aq.asq_last_status); return ret; } @@ -9628,8 +10263,8 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map) } ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL); if (ret) { - PMD_INIT_LOG(ERR, "AQ command Config VSI BW allocation" - " per TC failed = %d", + PMD_INIT_LOG(ERR, + "AQ command Config VSI BW allocation per TC failed = %d", hw->aq.asq_last_status); goto out; } @@ -9650,15 +10285,14 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map) /* Update the VSI after updating the VSI queue-mapping information */ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { - PMD_INIT_LOG(ERR, "Failed to configure " - "TC queue mapping = %d", - hw->aq.asq_last_status); + PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d", + hw->aq.asq_last_status); goto out; } /* update the local VSI info with updated queue map */ - (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, + rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, sizeof(vsi->info.tc_mapping)); - (void)rte_memcpy(&vsi->info.queue_mapping, + rte_memcpy(&vsi->info.queue_mapping, &ctxt.info.queue_mapping, sizeof(vsi->info.queue_mapping)); vsi->info.mapping_flags = ctxt.info.mapping_flags; @@ -9704,8 +10338,8 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, /* Use the FW API if FW > v4.4*/ if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) || (hw->aq.fw_maj_ver >= 5))) { - PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API" - " to configure DCB"); + PMD_INIT_LOG(ERR, + "FW < v4.4, can not use FW LLDP API to configure DCB"); return I40E_ERR_FIRMWARE_API_VERSION; } @@ -9720,8 +10354,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, old_cfg->etsrec = old_cfg->etscfg; ret = i40e_set_dcb_config(hw); if (ret) { - PMD_INIT_LOG(ERR, - "Set DCB Config failed, err %s aq_err %s\n", + PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s", i40e_stat_str(hw, ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; @@ -9753,7 +10386,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map); if (ret) PMD_INIT_LOG(WARNING, - "Failed configuring TC for VEB seid=%d\n", + "Failed configuring TC for VEB seid=%d", main_vsi->veb->seid); } /* Update each VSI */ @@ -9771,8 +10404,8 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, I40E_DEFAULT_TCMAP); if (ret) PMD_INIT_LOG(WARNING, - "Failed configuring TC for VSI seid=%d\n", - vsi_list->vsi->seid); + "Failed configuring TC for VSI seid=%d", + vsi_list->vsi->seid); /* continue */ } } @@ -9791,7 +10424,7 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret = 0; + int i, ret = 0; if ((pf->flags & I40E_FLAG_DCB) == 0) { PMD_INIT_LOG(ERR, "HW doesn't support DCB"); @@ -9818,6 +10451,9 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) hw->local_dcbx_config.etscfg.tcbwtable[0] = 100; hw->local_dcbx_config.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS; + /* all UPs mapping to TC0 */ + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + hw->local_dcbx_config.etscfg.prioritytable[i] = 0; hw->local_dcbx_config.etsrec = hw->local_dcbx_config.etscfg; hw->local_dcbx_config.pfc.willing = 0; @@ -9832,15 +10468,15 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) I40E_APP_PROTOID_FCOE; ret = i40e_set_dcb_config(hw); if (ret) { - PMD_INIT_LOG(ERR, "default dcb config fails." - " err = %d, aq_err = %d.", ret, - hw->aq.asq_last_status); + PMD_INIT_LOG(ERR, + "default dcb config fails. err = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); return -ENOSYS; } } else { - PMD_INIT_LOG(ERR, "DCB initialization in FW fails," - " err = %d, aq_err = %d.", ret, - hw->aq.asq_last_status); + PMD_INIT_LOG(ERR, + "DCB initialization in FW fails, err = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); return -ENOTSUP; } } else { @@ -9851,14 +10487,14 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) ret = i40e_init_dcb(hw); if (!ret) { if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { - PMD_INIT_LOG(ERR, "HW doesn't support" - " DCBX offload."); + PMD_INIT_LOG(ERR, + "HW doesn't support DCBX offload."); return -ENOTSUP; } } else { - PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d," - " aq_err = %d.", ret, - hw->aq.asq_last_status); + PMD_INIT_LOG(ERR, + "DCBX configuration failed, err = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); return -ENOTSUP; } } @@ -9967,7 +10603,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev, static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t interval = @@ -10001,7 +10637,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t msix_intr; @@ -10124,8 +10760,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct rte_eth_dev_data *dev_data = pf->dev_data; - uint32_t frame_size = mtu + ETHER_HDR_LEN - + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE; + uint32_t frame_size = mtu + I40E_ETH_OVERHEAD; int ret = 0; /* check if mtu is within the allowed range */ @@ -10134,8 +10769,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* mtu setting is forbidden if port is start */ if (dev_data->dev_started) { - PMD_DRV_LOG(ERR, - "port %d must be stopped before configuration\n", + PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", dev_data->port_id); return -EBUSY; } @@ -10178,7 +10812,7 @@ i40e_ethertype_filter_restore(struct i40e_pf *pf) } PMD_DRV_LOG(INFO, "Ethertype filter:" " mac_etype_used = %u, etype_used = %u," - " mac_etype_free = %u, etype_free = %u\n", + " mac_etype_free = %u, etype_free = %u", stats.mac_etype_used, stats.etype_used, stats.mac_etype_free, stats.etype_free); } @@ -10188,17 +10822,51 @@ static void i40e_tunnel_filter_restore(struct i40e_pf *pf) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct i40e_vsi *vsi = pf->main_vsi; + struct i40e_vsi *vsi; + struct i40e_pf_vf *vf; struct i40e_tunnel_filter_list *tunnel_list = &pf->tunnel.tunnel_list; struct i40e_tunnel_filter *f; - struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter; + struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter; + bool big_buffer = 0; TAILQ_FOREACH(f, tunnel_list, rules) { + if (!f->is_to_vf) + vsi = pf->main_vsi; + else { + vf = &pf->vfs[f->vf_id]; + vsi = vf->vsi; + } memset(&cld_filter, 0, sizeof(cld_filter)); - rte_memcpy(&cld_filter, &f->input, sizeof(f->input)); - cld_filter.queue_number = f->queue; - i40e_aq_add_cloud_filters(hw, vsi->seid, &cld_filter, 1); + ether_addr_copy((struct ether_addr *)&f->input.outer_mac, + (struct ether_addr *)&cld_filter.element.outer_mac); + ether_addr_copy((struct ether_addr *)&f->input.inner_mac, + (struct ether_addr *)&cld_filter.element.inner_mac); + cld_filter.element.inner_vlan = f->input.inner_vlan; + cld_filter.element.flags = f->input.flags; + cld_filter.element.tenant_id = f->input.tenant_id; + cld_filter.element.queue_number = f->queue; + rte_memcpy(cld_filter.general_fields, + f->input.general_fields, + sizeof(f->input.general_fields)); + + if (((f->input.flags & + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) == + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) || + ((f->input.flags & + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) == + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) || + ((f->input.flags & + I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) == + I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ)) + big_buffer = 1; + + if (big_buffer) + i40e_aq_add_cloud_filters_big_buffer(hw, + vsi->seid, &cld_filter, 1); + else + i40e_aq_add_cloud_filters(hw, vsi->seid, + &cld_filter.element, 1); } } @@ -10209,3 +10877,135 @@ i40e_filter_restore(struct i40e_pf *pf) i40e_tunnel_filter_restore(pf); i40e_fdir_filter_restore(pf); } + +static bool +is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) +{ + if (strcmp(dev->device->driver->name, drv->driver.name)) + return false; + + return true; +} + +bool +is_i40e_supported(struct rte_eth_dev *dev) +{ + return is_device_supported(dev, &rte_i40e_pmd); +} + +/* Create a QinQ cloud filter + * + * The Fortville NIC has limited resources for tunnel filters, + * so we can only reuse existing filters. + * + * In step 1 we define which Field Vector fields can be used for + * filter types. + * As we do not have the inner tag defined as a field, + * we have to define it first, by reusing one of L1 entries. + * + * In step 2 we are replacing one of existing filter types with + * a new one for QinQ. + * As we reusing L1 and replacing L2, some of the default filter + * types will disappear,which depends on L1 and L2 entries we reuse. + * + * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b) + * + * 1. Create L1 filter of outer vlan (12b) which will be in use + * later when we define the cloud filter. + * a. Valid_flags.replace_cloud = 0 + * b. Old_filter = 10 (Stag_Inner_Vlan) + * c. New_filter = 0x10 + * d. TR bit = 0xff (optional, not used here) + * e. Buffer – 2 entries: + * i. Byte 0 = 8 (outer vlan FV index). + * Byte 1 = 0 (rsv) + * Byte 2-3 = 0x0fff + * ii. Byte 0 = 37 (inner vlan FV index). + * Byte 1 =0 (rsv) + * Byte 2-3 = 0x0fff + * + * Step 2: + * 2. Create cloud filter using two L1 filters entries: stag and + * new filter(outer vlan+ inner vlan) + * a. Valid_flags.replace_cloud = 1 + * b. Old_filter = 1 (instead of outer IP) + * c. New_filter = 0x10 + * d. Buffer – 2 entries: + * i. Byte 0 = 0x80 | 7 (valid | Stag). + * Byte 1-3 = 0 (rsv) + * ii. Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1) + * Byte 9-11 = 0 (rsv) + */ +static int +i40e_cloud_filter_qinq_create(struct i40e_pf *pf) +{ + int ret = -ENOTSUP; + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + + /* Init */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + /* create L1 filter */ + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN; + filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + filter_replace.tr_bit = 0; + + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + /* Field Vector 12b mask */ + filter_replace_buf.data[2] = 0xff; + filter_replace_buf.data[3] = 0x0f; + filter_replace_buf.data[4] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + /* Field Vector 12b mask */ + filter_replace_buf.data[6] = 0xff; + filter_replace_buf.data[7] = 0x0f; + ret = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (ret != I40E_SUCCESS) + return ret; + + /* Apply the second L2 cloud filter */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + /* create L2 filter, input for L2 filter will be L1 filter */ + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER; + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP; + filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + ret = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + return ret; +} + +RTE_INIT(i40e_init_log); +static void +i40e_init_log(void) +{ + i40e_logtype_init = rte_log_register("pmd.i40e.init"); + if (i40e_logtype_init >= 0) + rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE); + i40e_logtype_driver = rte_log_register("pmd.i40e.driver"); + if (i40e_logtype_driver >= 0) + rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE); +}