X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev.c;h=a2e35ebcfe516a0f39c5502ac52e86bbad6b0e08;hb=92ef4b8f1688ded571fb2085727e5e82f2afe5d6;hp=1afddeb1f3e6bbd49e4435a604a6216eadefedb7;hpb=ec26c81df7bc36d2e4ade041e1d67e28c05ce185;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 1afddeb1f3..a2e35ebcfe 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -17,8 +17,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include @@ -26,6 +26,8 @@ #include #include #include +#include +#include #include "i40e_logs.h" #include "base/i40e_prototype.h" @@ -38,15 +40,17 @@ #include "i40e_pf.h" #include "i40e_regs.h" #include "rte_pmd_i40e.h" +#include "i40e_hash.h" #define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb" #define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list" #define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver" #define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf" -#define ETH_I40E_USE_LATEST_VEC "use-latest-supported-vec" #define ETH_I40E_VF_MSG_CFG "vf_msg_cfg" #define I40E_CLEAR_PXE_WAIT_MS 200 +#define I40E_VSI_TSR_QINQ_STRIP 0x4010 +#define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4)) /* Maximun number of capability elements */ #define I40E_MAX_CAP_ELE_NUM 128 @@ -199,12 +203,12 @@ #define I40E_TRANSLATE_INSET 0 #define I40E_TRANSLATE_REG 1 -#define I40E_INSET_IPV4_TOS_MASK 0x0009FF00UL -#define I40E_INSET_IPv4_TTL_MASK 0x000D00FFUL -#define I40E_INSET_IPV4_PROTO_MASK 0x000DFF00UL -#define I40E_INSET_IPV6_TC_MASK 0x0009F00FUL -#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x000CFF00UL -#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL +#define I40E_INSET_IPV4_TOS_MASK 0x0000FF00UL +#define I40E_INSET_IPV4_TTL_MASK 0x000000FFUL +#define I40E_INSET_IPV4_PROTO_MASK 0x0000FF00UL +#define I40E_INSET_IPV6_TC_MASK 0x0000F00FUL +#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x0000FF00UL +#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000000FFUL /* PCI offset for querying capability */ #define PCI_DEV_CAP_REG 0xA4 @@ -217,12 +221,31 @@ /* Bit mask of Extended Tag enable/disable */ #define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT) +#define I40E_GLQF_PIT_IPV4_START 2 +#define I40E_GLQF_PIT_IPV4_COUNT 2 +#define I40E_GLQF_PIT_IPV6_START 4 +#define I40E_GLQF_PIT_IPV6_COUNT 2 + +#define I40E_GLQF_PIT_SOURCE_OFF_GET(a) \ + (((a) & I40E_GLQF_PIT_SOURCE_OFF_MASK) >> \ + I40E_GLQF_PIT_SOURCE_OFF_SHIFT) + +#define I40E_GLQF_PIT_DEST_OFF_GET(a) \ + (((a) & I40E_GLQF_PIT_DEST_OFF_MASK) >> \ + I40E_GLQF_PIT_DEST_OFF_SHIFT) + +#define I40E_GLQF_PIT_FSIZE_GET(a) (((a) & I40E_GLQF_PIT_FSIZE_MASK) >> \ + I40E_GLQF_PIT_FSIZE_SHIFT) + +#define I40E_GLQF_PIT_BUILD(off, mask) (((off) << 16) | (mask)) +#define I40E_FDIR_FIELD_OFFSET(a) ((a) >> 1) + static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params); static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev); static int i40e_dev_configure(struct rte_eth_dev *dev); static int i40e_dev_start(struct rte_eth_dev *dev); -static void i40e_dev_stop(struct rte_eth_dev *dev); -static void i40e_dev_close(struct rte_eth_dev *dev); +static int i40e_dev_stop(struct rte_eth_dev *dev); +static int i40e_dev_close(struct rte_eth_dev *dev); static int i40e_dev_reset(struct rte_eth_dev *dev); static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev); static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev); @@ -301,7 +324,6 @@ static int i40e_dev_init_vlan(struct rte_eth_dev *dev); static int i40e_veb_release(struct i40e_veb *veb); static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi); -static int i40e_pf_config_mq_rx(struct i40e_pf *pf); static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on); static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi, struct i40e_macvlan_filter *mv_f, @@ -317,29 +339,14 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel); static void i40e_filter_input_set_init(struct i40e_pf *pf); -static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg); -static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev, - enum rte_filter_type filter_type, - enum rte_filter_op filter_op, - void *arg); +static int i40e_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops); static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info); static int i40e_dev_sync_phy_type(struct i40e_hw *hw); static void i40e_configure_registers(struct i40e_hw *hw); static void i40e_hw_init(struct rte_eth_dev *dev); static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi); -static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw, - uint16_t seid, - uint16_t rule_type, - uint16_t *entries, - uint16_t count, - uint16_t rule_id); -static int i40e_mirror_rule_set(struct rte_eth_dev *dev, - struct rte_eth_mirror_conf *mirror_conf, - uint8_t sw_id, uint8_t on); -static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id); static int i40e_timesync_enable(struct rte_eth_dev *dev); static int i40e_timesync_disable(struct rte_eth_dev *dev); @@ -398,24 +405,11 @@ static void i40e_tunnel_filter_restore(struct i40e_pf *pf); static void i40e_filter_restore(struct i40e_pf *pf); static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev); -int i40e_logtype_init; -int i40e_logtype_driver; -#ifdef RTE_LIBRTE_I40E_DEBUG_RX -int i40e_logtype_rx; -#endif -#ifdef RTE_LIBRTE_I40E_DEBUG_TX -int i40e_logtype_tx; -#endif -#ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE -int i40e_logtype_tx_free; -#endif - static const char *const valid_keys[] = { ETH_I40E_FLOATING_VEB_ARG, ETH_I40E_FLOATING_VEB_LIST_ARG, ETH_I40E_SUPPORT_MULTI_DRIVER, ETH_I40E_QUEUE_NUM_PER_VF_ARG, - ETH_I40E_USE_LATEST_VEC, ETH_I40E_VF_MSG_CFG, NULL}; @@ -443,6 +437,9 @@ static const struct rte_pci_id pci_id_i40e_map[] = { { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -480,10 +477,6 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable, .rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable, .rx_queue_release = i40e_dev_rx_queue_release, - .rx_queue_count = i40e_dev_rx_queue_count, - .rx_descriptor_done = i40e_dev_rx_descriptor_done, - .rx_descriptor_status = i40e_dev_rx_descriptor_status, - .tx_descriptor_status = i40e_dev_tx_descriptor_status, .tx_queue_setup = i40e_dev_tx_queue_setup, .tx_queue_release = i40e_dev_tx_queue_release, .dev_led_on = i40e_dev_led_on, @@ -499,13 +492,11 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .rss_hash_conf_get = i40e_dev_rss_hash_conf_get, .udp_tunnel_port_add = i40e_dev_udp_tunnel_port_add, .udp_tunnel_port_del = i40e_dev_udp_tunnel_port_del, - .filter_ctrl = i40e_dev_filter_ctrl, + .flow_ops_get = i40e_dev_flow_ops_get, .rxq_info_get = i40e_rxq_info_get, .txq_info_get = i40e_txq_info_get, .rx_burst_mode_get = i40e_rx_burst_mode_get, .tx_burst_mode_get = i40e_tx_burst_mode_get, - .mirror_rule_set = i40e_mirror_rule_set, - .mirror_rule_reset = i40e_mirror_rule_reset, .timesync_enable = i40e_timesync_enable, .timesync_disable = i40e_timesync_disable, .timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp, @@ -522,6 +513,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .mac_addr_set = i40e_set_default_mac_addr, .mtu_set = i40e_dev_mtu_set, .tm_ops_get = i40e_tm_ops_get, + .tx_done_cleanup = i40e_tx_done_cleanup, + .get_monitor_addr = i40e_get_monitor_addr, }; /* store statistics names and its offset in stats structure */ @@ -652,6 +645,13 @@ eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, return retval; } + if (eth_da.nb_representor_ports > 0 && + eth_da.type != RTE_ETH_REPRESENTOR_VF) { + PMD_DRV_LOG(ERR, "unsupported representor type: %s\n", + pci_dev->device.devargs->args); + return -ENOTSUP; + } + retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, sizeof(struct i40e_adapter), eth_dev_pci_specific_init, pci_dev, @@ -720,10 +720,11 @@ i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr, uint32_t reg_val) { uint32_t ori_reg_val; - struct rte_eth_dev *dev; + struct rte_eth_dev_data *dev_data = + ((struct i40e_adapter *)hw->back)->pf.dev_data; + struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id]; ori_reg_val = i40e_read_rx_ctl(hw, reg_addr); - dev = ((struct i40e_adapter *)hw->back)->eth_dev; i40e_write_rx_ctl(hw, reg_addr, reg_val); if (ori_reg_val != reg_val) PMD_DRV_LOG(WARNING, @@ -775,6 +776,21 @@ static inline void i40e_config_automask(struct i40e_pf *pf) I40E_WRITE_REG(hw, I40E_GLINT_CTL, val); } +static inline void i40e_clear_automask(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t val; + + val = I40E_READ_REG(hw, I40E_GLINT_CTL); + val &= ~(I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK | + I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK); + + if (!pf->support_multi_driver) + val &= ~I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK; + + I40E_WRITE_REG(hw, I40E_GLINT_CTL, val); +} + #define I40E_FLOW_CONTROL_ETHERTYPE 0x8808 /* @@ -827,6 +843,8 @@ floating_veb_list_handler(__rte_unused const char *key, idx = strtoul(floating_veb_value, &end, 10); if (errno || end == NULL) return -1; + if (idx < 0) + return -1; while (isblank(*end)) end++; if (*end == '-') { @@ -1053,8 +1071,16 @@ static int i40e_init_fdir_filter_list(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_fdir_info *fdir_info = &pf->fdir; char fdir_hash_name[RTE_HASH_NAMESIZE]; + uint32_t alloc = hw->func_caps.fd_filters_guaranteed; + uint32_t best = hw->func_caps.fd_filters_best_effort; + enum i40e_filter_pctype pctype; + struct rte_bitmap *bmp = NULL; + uint32_t bmp_size; + void *mem = NULL; + uint32_t i = 0; int ret; struct rte_hash_parameters fdir_hash_params = { @@ -1075,6 +1101,7 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev) PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); return -EINVAL; } + fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map", sizeof(struct i40e_fdir_filter *) * I40E_MAX_FDIR_FILTER_NUM, @@ -1085,8 +1112,78 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev) ret = -ENOMEM; goto err_fdir_hash_map_alloc; } + + fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter", + sizeof(struct i40e_fdir_filter) * + I40E_MAX_FDIR_FILTER_NUM, + 0); + + if (!fdir_info->fdir_filter_array) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir filter array!"); + ret = -ENOMEM; + goto err_fdir_filter_array_alloc; + } + + for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) + pf->fdir.flow_count[pctype] = 0; + + fdir_info->fdir_space_size = alloc + best; + fdir_info->fdir_actual_cnt = 0; + fdir_info->fdir_guarantee_total_space = alloc; + fdir_info->fdir_guarantee_free_space = + fdir_info->fdir_guarantee_total_space; + + PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best); + + fdir_info->fdir_flow_pool.pool = + rte_zmalloc("i40e_fdir_entry", + sizeof(struct i40e_fdir_entry) * + fdir_info->fdir_space_size, + 0); + + if (!fdir_info->fdir_flow_pool.pool) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for bitmap flow!"); + ret = -ENOMEM; + goto err_fdir_bitmap_flow_alloc; + } + + for (i = 0; i < fdir_info->fdir_space_size; i++) + fdir_info->fdir_flow_pool.pool[i].idx = i; + + bmp_size = + rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size); + mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE); + if (mem == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir bitmap!"); + ret = -ENOMEM; + goto err_fdir_mem_alloc; + } + bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size); + if (bmp == NULL) { + PMD_INIT_LOG(ERR, + "Failed to initialization fdir bitmap!"); + ret = -ENOMEM; + goto err_fdir_bmp_alloc; + } + for (i = 0; i < fdir_info->fdir_space_size; i++) + rte_bitmap_set(bmp, i); + + fdir_info->fdir_flow_pool.bitmap = bmp; + return 0; +err_fdir_bmp_alloc: + rte_free(mem); +err_fdir_mem_alloc: + rte_free(fdir_info->fdir_flow_pool.pool); +err_fdir_bitmap_flow_alloc: + rte_free(fdir_info->fdir_filter_array); +err_fdir_filter_array_alloc: + rte_free(fdir_info->hash_map); err_fdir_hash_map_alloc: rte_hash_free(fdir_info->hash_table); @@ -1106,6 +1203,31 @@ i40e_init_customized_info(struct i40e_pf *pf) } pf->gtp_support = false; + pf->esp_support = false; +} + +static void +i40e_init_filter_invalidation(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_fdir_info *fdir_info = &pf->fdir; + uint32_t glqf_ctl_reg = 0; + + glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); + if (!pf->support_multi_driver) { + fdir_info->fdir_invalprio = 1; + glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK; + PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first"); + i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg); + } else { + if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) { + fdir_info->fdir_invalprio = 1; + PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first"); + } else { + fdir_info->fdir_invalprio = 0; + PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first"); + } + } } void @@ -1193,7 +1315,9 @@ i40e_aq_debug_write_global_register(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { uint64_t ori_reg_val; - struct rte_eth_dev *dev; + struct rte_eth_dev_data *dev_data = + ((struct i40e_adapter *)hw->back)->pf.dev_data; + struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id]; int ret; ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL); @@ -1203,7 +1327,6 @@ i40e_aq_debug_write_global_register(struct i40e_hw *hw, reg_addr); return -EIO; } - dev = ((struct i40e_adapter *)hw->back)->eth_dev; if (ori_reg_val != reg_val) PMD_DRV_LOG(WARNING, @@ -1214,62 +1337,6 @@ i40e_aq_debug_write_global_register(struct i40e_hw *hw, return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details); } -static int -i40e_parse_latest_vec_handler(__rte_unused const char *key, - const char *value, - void *opaque) -{ - struct i40e_adapter *ad = opaque; - int use_latest_vec; - - use_latest_vec = atoi(value); - - if (use_latest_vec != 0 && use_latest_vec != 1) - PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!"); - - ad->use_latest_vec = (uint8_t)use_latest_vec; - - return 0; -} - -static int -i40e_use_latest_vec(struct rte_eth_dev *dev) -{ - struct i40e_adapter *ad = - I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); - struct rte_kvargs *kvlist; - int kvargs_count; - - ad->use_latest_vec = false; - - if (!dev->device->devargs) - return 0; - - kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); - if (!kvlist) - return -EINVAL; - - kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC); - if (!kvargs_count) { - rte_kvargs_free(kvlist); - return 0; - } - - if (kvargs_count > 1) - PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only " - "the first invalid or last valid one is used !", - ETH_I40E_USE_LATEST_VEC); - - if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC, - i40e_parse_latest_vec_handler, ad) < 0) { - rte_kvargs_free(kvlist); - return -EINVAL; - } - - rte_kvargs_free(kvlist); - return 0; -} - static int read_vf_msg_config(__rte_unused const char *key, const char *value, @@ -1354,6 +1421,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) PMD_INIT_FUNC_TRACE(); dev->dev_ops = &i40e_eth_dev_ops; + dev->rx_queue_count = i40e_dev_rx_queue_count; + dev->rx_descriptor_status = i40e_dev_rx_descriptor_status; + dev->tx_descriptor_status = i40e_dev_tx_descriptor_status; dev->rx_pkt_burst = i40e_recv_pkts; dev->tx_pkt_burst = i40e_xmit_pkts; dev->tx_pkt_prepare = i40e_prep_pkts; @@ -1371,9 +1441,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) intr_handle = &pci_dev->intr_handle; rte_eth_copy_pci_info(dev, pci_dev); + dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); - pf->adapter->eth_dev = dev; pf->dev_data = dev->data; hw->back = I40E_PF_TO_ADAPTER(pf); @@ -1415,8 +1485,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg); /* Check if need to support multi-driver */ i40e_support_multi_driver(dev); - /* Check if users want the latest supported vec path */ - i40e_use_latest_vec(dev); /* Make sure all is clean before doing PF reset */ i40e_clear_hw(hw); @@ -1442,8 +1510,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret); return -EIO; } - /* Firmware of SFP x722 does not support adminq option */ - if (hw->device_id == I40E_DEV_ID_SFP_X722) + /* Firmware of SFP x722 does not support 802.1ad frames ability */ + if (hw->device_id == I40E_DEV_ID_SFP_X722 || + hw->device_id == I40E_DEV_ID_SFP_I_X722) hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE; PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x", @@ -1605,11 +1674,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, &dev->data->mac_addrs[0]); - /* Pass the information to the rte_eth_dev_close() that it should also - * release the private port resources. - */ - dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; - /* Init dcb to sw mode by default */ ret = i40e_dcb_init_configure(dev, TRUE); if (ret != I40E_SUCCESS) { @@ -1647,10 +1711,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) /* Set the max frame size to 0x2600 by default, * in case other drivers changed the default value. */ - i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL); + i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL); - /* initialize mirror rule list */ - TAILQ_INIT(&pf->mirror_list); + /* initialize RSS rule list */ + TAILQ_INIT(&pf->rss_config_list); /* initialize Traffic Manager configuration */ i40e_tm_conf_init(dev); @@ -1658,6 +1722,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) /* Initialize customized information */ i40e_init_customized_info(pf); + /* Initialize the filter invalidation configuration */ + i40e_init_filter_invalidation(pf); + ret = i40e_init_ethtype_filter_list(dev); if (ret < 0) goto err_init_ethtype_filter_list; @@ -1671,22 +1738,20 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) /* initialize queue region configuration */ i40e_init_queue_region_conf(dev); - /* initialize rss configuration from rte_flow */ - memset(&pf->rss_info, 0, - sizeof(struct i40e_rte_flow_rss_conf)); - /* reset all stats of the device, including pf and main vsi */ i40e_dev_stats_reset(dev); return 0; err_init_fdir_filter_list: - rte_free(pf->tunnel.hash_table); + rte_hash_free(pf->tunnel.hash_table); rte_free(pf->tunnel.hash_map); err_init_tunnel_filter_list: - rte_free(pf->ethertype.hash_table); + rte_hash_free(pf->ethertype.hash_table); rte_free(pf->ethertype.hash_map); err_init_ethtype_filter_list: + rte_intr_callback_unregister(intr_handle, + i40e_dev_interrupt_handler, dev); rte_free(dev->data->mac_addrs); dev->data->mac_addrs = NULL; err_mac_alloc: @@ -1753,16 +1818,30 @@ i40e_rm_fdir_filter_list(struct i40e_pf *pf) struct i40e_fdir_info *fdir_info; fdir_info = &pf->fdir; - /* Remove all flow director rules and hash */ + + /* Remove all flow director rules */ + while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) + TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules); +} + +static void +i40e_fdir_memory_cleanup(struct i40e_pf *pf) +{ + struct i40e_fdir_info *fdir_info; + + fdir_info = &pf->fdir; + + /* flow director memory cleanup */ if (fdir_info->hash_map) rte_free(fdir_info->hash_map); if (fdir_info->hash_table) rte_hash_free(fdir_info->hash_table); - - while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) { - TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules); - rte_free(p_fdir); - } + if (fdir_info->fdir_flow_pool.bitmap) + rte_free(fdir_info->fdir_flow_pool.bitmap); + if (fdir_info->fdir_flow_pool.pool) + rte_free(fdir_info->fdir_flow_pool.pool); + if (fdir_info->fdir_filter_array) + rte_free(fdir_info->fdir_filter_array); } void i40e_flex_payload_reg_set_default(struct i40e_hw *hw) @@ -1816,7 +1895,8 @@ i40e_dev_configure(struct rte_eth_dev *dev) ad->tx_simple_allowed = true; ad->tx_vec_allowed = true; - dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; /* Only legacy filter API needs the following fdir config. So when the * legacy filter API is deprecated, the following codes should also be @@ -1841,8 +1921,6 @@ i40e_dev_configure(struct rte_eth_dev *dev) goto err; /* VMDQ setup. - * Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and - * RSS setting have different requirements. * General PMD driver call sequence are NIC init, configure, * rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it * will try to lookup the VSI that specific queue belongs to if VMDQ @@ -1891,7 +1969,7 @@ err: void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi) { - struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); @@ -2004,10 +2082,10 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, I40E_WRITE_FLUSH(hw); } -void +int i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) { - struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); @@ -2024,10 +2102,14 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) /* VF bind interrupt */ if (vsi->type == I40E_VSI_SRIOV) { + if (vsi->nb_msix == 0) { + PMD_DRV_LOG(ERR, "No msix resource"); + return -EINVAL; + } __vsi_queues_bind_intr(vsi, msix_vect, vsi->base_queue, vsi->nb_qps, itr_idx); - return; + return 0; } /* PF & VMDq bind interrupt */ @@ -2044,7 +2126,10 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) } for (i = 0; i < vsi->nb_used_qps; i++) { - if (nb_msix <= 1) { + if (vsi->nb_msix == 0) { + PMD_DRV_LOG(ERR, "No msix resource"); + return -EINVAL; + } else if (nb_msix <= 1) { if (!rte_intr_allow_others(intr_handle)) /* allow to share MISC_VEC_ID */ msix_vect = I40E_MISC_VEC_ID; @@ -2069,12 +2154,14 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) msix_vect++; nb_msix--; } + + return 0; } -static void +void i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) { - struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); @@ -2098,10 +2185,10 @@ i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) I40E_WRITE_FLUSH(hw); } -static void +void i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi) { - struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); @@ -2214,7 +2301,8 @@ i40e_phy_conf_link(struct i40e_hw *hw, phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0; phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR | I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR | - I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0; + I40E_AQ_PHY_TYPE_EXT_25G_LR | I40E_AQ_PHY_TYPE_EXT_25G_AOC | + I40E_AQ_PHY_TYPE_EXT_25G_ACC) : 0; phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info; phy_conf.eee_capability = phy_ab.eee_capability; phy_conf.eeer = phy_ab.eeer_val; @@ -2240,6 +2328,9 @@ i40e_apply_link_speed(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_conf *conf = &dev->data->dev_conf; + abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK | + I40E_AQ_PHY_LINK_ENABLED; + if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) { conf->link_speeds = ETH_LINK_SPEED_40G | ETH_LINK_SPEED_25G | @@ -2247,11 +2338,12 @@ i40e_apply_link_speed(struct rte_eth_dev *dev) ETH_LINK_SPEED_10G | ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M; + + abilities |= I40E_AQ_PHY_AN_ENABLED; + } else { + abilities &= ~I40E_AQ_PHY_AN_ENABLED; } speed = i40e_parse_link_speeds(conf->link_speeds); - abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK | - I40E_AQ_PHY_AN_ENABLED | - I40E_AQ_PHY_LINK_ENABLED; return i40e_phy_conf_link(hw, abilities, speed, true); } @@ -2267,16 +2359,10 @@ i40e_dev_start(struct rte_eth_dev *dev) struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; struct i40e_vsi *vsi; + uint16_t nb_rxq, nb_txq; hw->adapter_stopped = 0; - if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { - PMD_INIT_LOG(ERR, - "Invalid link_speeds for port %u, autonegotiation disabled", - dev->data->port_id); - return -EINVAL; - } - rte_intr_disable(intr_handle); if ((rte_intr_cap_multiple(intr_handle) || @@ -2305,35 +2391,38 @@ i40e_dev_start(struct rte_eth_dev *dev) ret = i40e_dev_rxtx_init(pf); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to init rx/tx queues"); - goto err_up; + return ret; } /* Map queues with MSIX interrupt */ main_vsi->nb_used_qps = dev->data->nb_rx_queues - pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; - i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT); + ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT); + if (ret < 0) + return ret; i40e_vsi_enable_queues_intr(main_vsi); /* Map VMDQ VSI queues with MSIX interrupt */ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; - i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi, - I40E_ITR_INDEX_DEFAULT); + ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi, + I40E_ITR_INDEX_DEFAULT); + if (ret < 0) + return ret; i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi); } - /* enable FDIR MSIX interrupt */ - if (pf->fdir.fdir_vsi) { - i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi, - I40E_ITR_INDEX_NONE); - i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi); + /* Enable all queues which have been configured */ + for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) { + ret = i40e_dev_rx_queue_start(dev, nb_rxq); + if (ret) + goto rx_err; } - /* Enable all queues which have been configured */ - ret = i40e_dev_switch_queues(pf, TRUE); - if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to enable VSI"); - goto err_up; + for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) { + ret = i40e_dev_tx_queue_start(dev, nb_txq); + if (ret) + goto tx_err; } /* Enable receiving broadcast packets */ @@ -2363,7 +2452,7 @@ i40e_dev_start(struct rte_eth_dev *dev) ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "fail to set loopback link"); - goto err_up; + goto tx_err; } } @@ -2371,7 +2460,7 @@ i40e_dev_start(struct rte_eth_dev *dev) ret = i40e_apply_link_speed(dev); if (I40E_SUCCESS != ret) { PMD_DRV_LOG(ERR, "Fail to apply link setting"); - goto err_up; + goto tx_err; } if (!rte_intr_allow_others(intr_handle)) { @@ -2414,14 +2503,17 @@ i40e_dev_start(struct rte_eth_dev *dev) return I40E_SUCCESS; -err_up: - i40e_dev_switch_queues(pf, FALSE); - i40e_dev_clear_queues(dev); +tx_err: + for (i = 0; i < nb_txq; i++) + i40e_dev_tx_queue_stop(dev, i); +rx_err: + for (i = 0; i < nb_rxq; i++) + i40e_dev_rx_queue_stop(dev, i); return ret; } -static void +static int i40e_dev_stop(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -2432,7 +2524,7 @@ i40e_dev_stop(struct rte_eth_dev *dev) int i; if (hw->adapter_stopped == 1) - return; + return 0; if (dev->data->dev_conf.intr_conf.rxq == 0) { rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev); @@ -2440,7 +2532,11 @@ i40e_dev_stop(struct rte_eth_dev *dev) } /* Disable all queues */ - i40e_dev_switch_queues(pf, FALSE); + for (i = 0; i < dev->data->nb_tx_queues; i++) + i40e_dev_tx_queue_stop(dev, i); + + for (i = 0; i < dev->data->nb_rx_queues; i++) + i40e_dev_rx_queue_stop(dev, i); /* un-map queues with interrupt registers */ i40e_vsi_disable_queues_intr(main_vsi); @@ -2451,10 +2547,6 @@ i40e_dev_stop(struct rte_eth_dev *dev) i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi); } - if (pf->fdir.fdir_vsi) { - i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi); - i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi); - } /* Clear all queues and release memory */ i40e_dev_clear_queues(dev); @@ -2478,18 +2570,20 @@ i40e_dev_stop(struct rte_eth_dev *dev) pf->tm_conf.committed = false; hw->adapter_stopped = 1; + dev->data->dev_started = 0; pf->adapter->rss_reta_updated = 0; + + return 0; } -static void +static int i40e_dev_close(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - struct i40e_mirror_rule *p_mirror; struct i40e_filter_control_settings settings; struct rte_flow *p_flow; uint32_t reg; @@ -2499,32 +2593,15 @@ i40e_dev_close(struct rte_eth_dev *dev) int retries = 0; PMD_INIT_FUNC_TRACE(); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; ret = rte_eth_switch_domain_free(pf->switch_domain_id); if (ret) PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret); - i40e_dev_stop(dev); - - /* Remove all mirror rules */ - while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) { - ret = i40e_aq_del_mirror_rule(hw, - pf->main_vsi->veb->seid, - p_mirror->rule_type, - p_mirror->entries, - p_mirror->num_entries, - p_mirror->id); - if (ret < 0) - PMD_DRV_LOG(ERR, "failed to remove mirror rule: " - "status = %d, aq_err = %d.", ret, - hw->aq.asq_last_status); - - /* remove mirror software resource anyway */ - TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules); - rte_free(p_mirror); - pf->nb_mirror_rule--; - } + ret = i40e_dev_stop(dev); i40e_dev_free_queues(dev); @@ -2569,10 +2646,6 @@ i40e_dev_close(struct rte_eth_dev *dev) (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); I40E_WRITE_FLUSH(hw); - dev->dev_ops = NULL; - dev->rx_pkt_burst = NULL; - dev->tx_pkt_burst = NULL; - /* Clear PXE mode */ i40e_clear_pxe_mode(hw); @@ -2593,7 +2666,7 @@ i40e_dev_close(struct rte_eth_dev *dev) do { ret = rte_intr_callback_unregister(intr_handle, i40e_dev_interrupt_handler, dev); - if (ret >= 0) { + if (ret >= 0 || ret == -ENOENT) { break; } else if (ret != -EAGAIN) { PMD_INIT_LOG(ERR, @@ -2610,13 +2683,21 @@ i40e_dev_close(struct rte_eth_dev *dev) /* Remove all flows */ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) { TAILQ_REMOVE(&pf->flow_list, p_flow, node); - rte_free(p_flow); + /* Do not free FDIR flows since they are static allocated */ + if (p_flow->filter_type != RTE_ETH_FILTER_FDIR) + rte_free(p_flow); } + /* release the fdir static allocated memory */ + i40e_fdir_memory_cleanup(pf); + /* Remove all Traffic Manager configuration */ i40e_tm_conf_uninit(dev); + i40e_clear_automask(pf); + hw->adapter_closed = 1; + return ret; } /* @@ -2883,7 +2964,10 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link, link->link_speed = ETH_SPEED_NUM_40G; break; default: - link->link_speed = ETH_SPEED_NUM_NONE; + if (link->link_status) + link->link_speed = ETH_SPEED_NUM_UNKNOWN; + else + link->link_speed = ETH_SPEED_NUM_NONE; break; } } @@ -2918,6 +3002,21 @@ i40e_dev_link_update(struct rte_eth_dev *dev, return ret; } +static void +i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg, + uint32_t loreg, bool offset_loaded, uint64_t *offset, + uint64_t *stat, uint64_t *prev_stat) +{ + i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat); + /* enlarge the limitation when statistics counters overflowed */ + if (offset_loaded) { + if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat) + *stat += (uint64_t)1 << I40E_48_BIT_WIDTH; + *stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat); + } + *prev_stat = *stat; +} + /* Get all the statistics of a VSI */ void i40e_update_vsi_stats(struct i40e_vsi *vsi) @@ -2927,9 +3026,9 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi) struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx); - i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx), - vsi->offset_loaded, &oes->rx_bytes, - &nes->rx_bytes); + i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx), + vsi->offset_loaded, &oes->rx_bytes, + &nes->rx_bytes, &vsi->prev_rx_bytes); i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx), vsi->offset_loaded, &oes->rx_unicast, &nes->rx_unicast); @@ -2950,9 +3049,9 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi) i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded, &oes->rx_unknown_protocol, &nes->rx_unknown_protocol); - i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx), - vsi->offset_loaded, &oes->tx_bytes, - &nes->tx_bytes); + i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx), + vsi->offset_loaded, &oes->tx_bytes, + &nes->tx_bytes, &vsi->prev_tx_bytes); i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx), vsi->offset_loaded, &oes->tx_unicast, &nes->tx_unicast); @@ -2994,17 +3093,18 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */ /* Get rx/tx bytes of internal transfer packets */ - i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port), - I40E_GLV_GORCL(hw->port), - pf->offset_loaded, - &pf->internal_stats_offset.rx_bytes, - &pf->internal_stats.rx_bytes); - - i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port), - I40E_GLV_GOTCL(hw->port), - pf->offset_loaded, - &pf->internal_stats_offset.tx_bytes, - &pf->internal_stats.tx_bytes); + i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port), + I40E_GLV_GORCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_bytes, + &pf->internal_stats.rx_bytes, + &pf->internal_prev_rx_bytes); + i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port), + I40E_GLV_GOTCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.tx_bytes, + &pf->internal_stats.tx_bytes, + &pf->internal_prev_tx_bytes); /* Get total internal rx packet count */ i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port), I40E_GLV_UPRCL(hw->port), @@ -3044,10 +3144,10 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN; /* Get statistics of struct i40e_eth_stats */ - i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port), - I40E_GLPRT_GORCL(hw->port), - pf->offset_loaded, &os->eth.rx_bytes, - &ns->eth.rx_bytes); + i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port), + I40E_GLPRT_GORCL(hw->port), + pf->offset_loaded, &os->eth.rx_bytes, + &ns->eth.rx_bytes, &pf->prev_rx_bytes); i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port), I40E_GLPRT_UPRCL(hw->port), pf->offset_loaded, &os->eth.rx_unicast, @@ -3102,10 +3202,10 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) pf->offset_loaded, &os->eth.rx_unknown_protocol, &ns->eth.rx_unknown_protocol); - i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port), - I40E_GLPRT_GOTCL(hw->port), - pf->offset_loaded, &os->eth.tx_bytes, - &ns->eth.tx_bytes); + i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port), + I40E_GLPRT_GOTCL(hw->port), + pf->offset_loaded, &os->eth.tx_bytes, + &ns->eth.tx_bytes, &pf->prev_tx_bytes); i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port), I40E_GLPRT_UPTCL(hw->port), pf->offset_loaded, &os->eth.tx_unicast, @@ -3561,9 +3661,11 @@ i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) ((hw->nvm.version >> 4) & 0xff), (hw->nvm.version & 0xf), hw->nvm.eetrack, ver, build, patch); + if (ret < 0) + return -EINVAL; ret += 1; /* add the size of '\0' */ - if (fw_size < (u32)ret) + if (fw_size < (size_t)ret) return ret; else return 0; @@ -3850,6 +3952,39 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, return ret; } +/* Configure outer vlan stripping on or off in QinQ mode */ +static int +i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int ret = I40E_SUCCESS; + uint32_t reg; + + if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) { + PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum"); + return -EINVAL; + } + + /* Configure for outer VLAN RX stripping */ + reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id)); + + if (on) + reg |= I40E_VSI_TSR_QINQ_STRIP; + else + reg &= ~I40E_VSI_TSR_QINQ_STRIP; + + ret = i40e_aq_debug_write_register(hw, + I40E_VSI_TSR(vsi->vsi_id), + reg, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]", + vsi->vsi_id); + return I40E_ERR_CONFIG; + } + + return ret; +} + static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) { @@ -3857,11 +3992,6 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) struct i40e_vsi *vsi = pf->main_vsi; struct rte_eth_rxmode *rxmode; - if (mask & ETH_QINQ_STRIP_MASK) { - PMD_DRV_LOG(ERR, "Strip qinq is not supported."); - return -ENOTSUP; - } - rxmode = &dev->data->dev_conf.rxmode; if (mask & ETH_VLAN_FILTER_MASK) { if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) @@ -3891,6 +4021,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) i40e_vsi_config_double_vlan(vsi, FALSE); } + if (mask & ETH_QINQ_STRIP_MASK) { + /* Enable or disable outer VLAN stripping */ + if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) + i40e_vsi_config_outer_vlan_stripping(vsi, TRUE); + else + i40e_vsi_config_outer_vlan_stripping(vsi, FALSE); + } + return 0; } @@ -4145,9 +4283,9 @@ i40e_macaddr_add(struct rte_eth_dev *dev, rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN); if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) - mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; else - mac_filter.filter_type = RTE_MAC_PERFECT_MATCH; + mac_filter.filter_type = I40E_MAC_PERFECT_MATCH; if (pool == 0) vsi = pf->main_vsi; @@ -4202,152 +4340,39 @@ i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) } } -/* Set perfect match or hash match of MAC and VLAN for a VF */ static int -i40e_vf_mac_filter_set(struct i40e_pf *pf, - struct rte_eth_mac_filter *filter, - bool add) +i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) { - struct i40e_hw *hw; - struct i40e_mac_filter_info mac_filter; - struct rte_ether_addr old_mac; - struct rte_ether_addr *new_mac; - struct i40e_pf_vf *vf = NULL; - uint16_t vf_id; + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint32_t reg; int ret; - if (pf == NULL) { - PMD_DRV_LOG(ERR, "Invalid PF argument."); - return -EINVAL; - } - hw = I40E_PF_TO_HW(pf); - - if (filter == NULL) { - PMD_DRV_LOG(ERR, "Invalid mac filter argument."); + if (!lut) return -EINVAL; - } - new_mac = &filter->mac_addr; + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, + vsi->type != I40E_VSI_SRIOV, + lut, lut_size); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get RSS lookup table"); + return ret; + } + } else { + uint32_t *lut_dw = (uint32_t *)lut; + uint16_t i, lut_size_dw = lut_size / 4; - if (rte_is_zero_ether_addr(new_mac)) { - PMD_DRV_LOG(ERR, "Invalid ethernet address."); - return -EINVAL; - } - - vf_id = filter->dst_id; - - if (vf_id > pf->vf_num - 1 || !pf->vfs) { - PMD_DRV_LOG(ERR, "Invalid argument."); - return -EINVAL; - } - vf = &pf->vfs[vf_id]; - - if (add && rte_is_same_ether_addr(new_mac, &pf->dev_addr)) { - PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address."); - return -EINVAL; - } - - if (add) { - rte_memcpy(&old_mac, hw->mac.addr, RTE_ETHER_ADDR_LEN); - rte_memcpy(hw->mac.addr, new_mac->addr_bytes, - RTE_ETHER_ADDR_LEN); - rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr, - RTE_ETHER_ADDR_LEN); - - mac_filter.filter_type = filter->filter_type; - ret = i40e_vsi_add_mac(vf->vsi, &mac_filter); - if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to add MAC filter."); - return -1; - } - rte_ether_addr_copy(new_mac, &pf->dev_addr); - } else { - rte_memcpy(hw->mac.addr, hw->mac.perm_addr, - RTE_ETHER_ADDR_LEN); - ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr); - if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to delete MAC filter."); - return -1; - } - - /* Clear device address as it has been removed */ - if (rte_is_same_ether_addr(&pf->dev_addr, new_mac)) - memset(&pf->dev_addr, 0, sizeof(struct rte_ether_addr)); - } - - return 0; -} - -/* MAC filter handle */ -static int -i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, - void *arg) -{ - struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct rte_eth_mac_filter *filter; - struct i40e_hw *hw = I40E_PF_TO_HW(pf); - int ret = I40E_NOT_SUPPORTED; - - filter = (struct rte_eth_mac_filter *)(arg); - - switch (filter_op) { - case RTE_ETH_FILTER_NOP: - ret = I40E_SUCCESS; - break; - case RTE_ETH_FILTER_ADD: - i40e_pf_disable_irq0(hw); - if (filter->is_vf) - ret = i40e_vf_mac_filter_set(pf, filter, 1); - i40e_pf_enable_irq0(hw); - break; - case RTE_ETH_FILTER_DELETE: - i40e_pf_disable_irq0(hw); - if (filter->is_vf) - ret = i40e_vf_mac_filter_set(pf, filter, 0); - i40e_pf_enable_irq0(hw); - break; - default: - PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); - ret = I40E_ERR_PARAM; - break; - } - - return ret; -} - -static int -i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) -{ - struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); - struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); - uint32_t reg; - int ret; - - if (!lut) - return -EINVAL; - - if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { - ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, - vsi->type != I40E_VSI_SRIOV, - lut, lut_size); - if (ret) { - PMD_DRV_LOG(ERR, "Failed to get RSS lookup table"); - return ret; - } - } else { - uint32_t *lut_dw = (uint32_t *)lut; - uint16_t i, lut_size_dw = lut_size / 4; - - if (vsi->type == I40E_VSI_SRIOV) { - for (i = 0; i <= lut_size_dw; i++) { - reg = I40E_VFQF_HLUT1(i, vsi->user_param); - lut_dw[i] = i40e_read_rx_ctl(hw, reg); - } - } else { - for (i = 0; i < lut_size_dw; i++) - lut_dw[i] = I40E_READ_REG(hw, - I40E_PFQF_HLUT(i)); - } + if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i <= lut_size_dw; i++) { + reg = I40E_VFQF_HLUT1(i, vsi->user_param); + lut_dw[i] = i40e_read_rx_ctl(hw, reg); + } + } else { + for (i = 0; i < lut_size_dw; i++) + lut_dw[i] = I40E_READ_REG(hw, + I40E_PFQF_HLUT(i)); + } } return 0; @@ -4358,7 +4383,6 @@ i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) { struct i40e_pf *pf; struct i40e_hw *hw; - int ret; if (!vsi || !lut) return -EINVAL; @@ -4367,12 +4391,16 @@ i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) hw = I40E_VSI_TO_HW(vsi); if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { - ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, - vsi->type != I40E_VSI_SRIOV, - lut, lut_size); - if (ret) { - PMD_DRV_LOG(ERR, "Failed to set RSS lookup table"); - return ret; + enum i40e_status_code status; + + status = i40e_aq_set_rss_lut(hw, vsi->vsi_id, + vsi->type != I40E_VSI_SRIOV, + lut, lut_size); + if (status) { + PMD_DRV_LOG(ERR, + "Failed to update RSS lookup table, error status: %d", + status); + return -EIO; } } else { uint32_t *lut_dw = (uint32_t *)lut; @@ -4487,18 +4515,20 @@ out: * @alignment: what to align the allocation to **/ enum i40e_status_code -i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, +i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw, struct i40e_dma_mem *mem, u64 size, u32 alignment) { + static uint64_t i40e_dma_memzone_id; const struct rte_memzone *mz = NULL; char z_name[RTE_MEMZONE_NAMESIZE]; if (!mem) return I40E_ERR_PARAM; - snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand()); + snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64, + __atomic_fetch_add(&i40e_dma_memzone_id, 1, __ATOMIC_RELAXED)); mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M); if (!mz) @@ -4521,7 +4551,7 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, * @mem: ptr to mem struct to free **/ enum i40e_status_code -i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, +i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw, struct i40e_dma_mem *mem) { if (!mem) @@ -4545,7 +4575,7 @@ i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, * @size: size of memory requested **/ enum i40e_status_code -i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw, +i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size) { @@ -4567,7 +4597,7 @@ i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw, * @mem: pointer to mem struct to free **/ enum i40e_status_code -i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw, +i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw, struct i40e_virt_mem *mem) { if (!mem) @@ -4598,7 +4628,7 @@ i40e_release_spinlock_d(struct i40e_spinlock *sp) } void -i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp) +i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp) { return; } @@ -4929,6 +4959,7 @@ i40e_res_pool_free(struct i40e_res_pool_info *pool, { struct pool_entry *entry, *next, *prev, *valid_entry = NULL; uint32_t pool_offset; + uint16_t len; int insert; if (pool == NULL) { @@ -4967,12 +4998,13 @@ i40e_res_pool_free(struct i40e_res_pool_info *pool, } insert = 0; + len = valid_entry->len; /* Try to merge with next one*/ if (next != NULL) { /* Merge with next one */ - if (valid_entry->base + valid_entry->len == next->base) { + if (valid_entry->base + len == next->base) { next->base = valid_entry->base; - next->len += valid_entry->len; + next->len += len; rte_free(valid_entry); valid_entry = next; insert = 1; @@ -4982,13 +5014,15 @@ i40e_res_pool_free(struct i40e_res_pool_info *pool, if (prev != NULL) { /* Merge with previous one */ if (prev->base + prev->len == valid_entry->base) { - prev->len += valid_entry->len; + prev->len += len; /* If it merge with next one, remove next node */ if (insert == 1) { LIST_REMOVE(valid_entry, next); rte_free(valid_entry); + valid_entry = NULL; } else { rte_free(valid_entry); + valid_entry = NULL; insert = 1; } } @@ -5004,8 +5038,8 @@ i40e_res_pool_free(struct i40e_res_pool_info *pool, LIST_INSERT_HEAD(&pool->free_list, valid_entry, next); } - pool->num_free += valid_entry->len; - pool->num_alloc -= valid_entry->len; + pool->num_free += len; + pool->num_alloc -= len; return 0; } @@ -5368,7 +5402,7 @@ i40e_vsi_release(struct i40e_vsi *vsi) /* VSI has child to attach, release child first */ if (vsi->veb) { - TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) { + RTE_TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) { if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS) return -1; } @@ -5376,7 +5410,8 @@ i40e_vsi_release(struct i40e_vsi *vsi) } if (vsi->floating_veb) { - TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) { + RTE_TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, + list, temp) { if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS) return -1; } @@ -5384,7 +5419,7 @@ i40e_vsi_release(struct i40e_vsi *vsi) /* Remove all macvlan filters of the VSI */ i40e_vsi_remove_all_macvlan_filter(vsi); - TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) + RTE_TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) rte_free(f); if (vsi->type != I40E_VSI_MAIN && @@ -5462,7 +5497,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) mac = &f->mac_info.mac_addr; rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr, ETH_ADDR_LEN); - f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH; + f->mac_info.filter_type = I40E_MACVLAN_PERFECT_MATCH; TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); vsi->mac_num++; @@ -5470,7 +5505,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) } rte_memcpy(&filter.mac_addr, (struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN); - filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; return i40e_vsi_add_mac(vsi, &filter); } @@ -5709,10 +5744,14 @@ i40e_vsi_setup(struct i40e_pf *pf, ret = i40e_res_pool_alloc(&pf->msix_pool, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret); - goto fail_queue_alloc; + if (type != I40E_VSI_FDIR) + goto fail_queue_alloc; + vsi->msix_intr = 0; + vsi->nb_msix = 0; + } else { + vsi->msix_intr = ret; + vsi->nb_msix = 1; } - vsi->msix_intr = ret; - vsi->nb_msix = 1; } else { vsi->msix_intr = 0; vsi->nb_msix = 0; @@ -5932,7 +5971,7 @@ i40e_vsi_setup(struct i40e_pf *pf, /* MAC/VLAN configuration */ rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); - filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; ret = i40e_vsi_add_mac(vsi, &filter); if (ret != I40E_SUCCESS) { @@ -5960,15 +5999,15 @@ i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on) struct i40e_mac_filter *f; void *temp; struct i40e_mac_filter_info *mac_filter; - enum rte_mac_filter_type desired_filter; + enum i40e_mac_filter_type desired_filter; int ret = I40E_SUCCESS; if (on) { /* Filter to match MAC and VLAN */ - desired_filter = RTE_MACVLAN_PERFECT_MATCH; + desired_filter = I40E_MACVLAN_PERFECT_MATCH; } else { /* Filter to match only MAC */ - desired_filter = RTE_MAC_PERFECT_MATCH; + desired_filter = I40E_MAC_PERFECT_MATCH; } num = vsi->mac_num; @@ -5983,7 +6022,7 @@ i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on) i = 0; /* Remove all existing mac */ - TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) { + RTE_TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) { mac_filter[i] = f->mac_info; ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr); if (ret) { @@ -6061,6 +6100,7 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev) /* Apply vlan offload setting */ mask = ETH_VLAN_STRIP_MASK | + ETH_QINQ_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; ret = i40e_vlan_offload_set(dev, mask); @@ -6276,33 +6316,6 @@ i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on) return I40E_SUCCESS; } -/* Swith on or off the tx queues */ -static int -i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on) -{ - struct rte_eth_dev_data *dev_data = pf->dev_data; - struct i40e_tx_queue *txq; - struct rte_eth_dev *dev = pf->adapter->eth_dev; - uint16_t i; - int ret; - - for (i = 0; i < dev_data->nb_tx_queues; i++) { - txq = dev_data->tx_queues[i]; - /* Don't operate the queue if not configured or - * if starting only per queue */ - if (!txq || !txq->q_set || (on && txq->tx_deferred_start)) - continue; - if (on) - ret = i40e_dev_tx_queue_start(dev, i); - else - ret = i40e_dev_tx_queue_stop(dev, i); - if ( ret != I40E_SUCCESS) - return ret; - } - - return I40E_SUCCESS; -} - int i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on) { @@ -6354,59 +6367,6 @@ i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on) return I40E_SUCCESS; } -/* Switch on or off the rx queues */ -static int -i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on) -{ - struct rte_eth_dev_data *dev_data = pf->dev_data; - struct i40e_rx_queue *rxq; - struct rte_eth_dev *dev = pf->adapter->eth_dev; - uint16_t i; - int ret; - - for (i = 0; i < dev_data->nb_rx_queues; i++) { - rxq = dev_data->rx_queues[i]; - /* Don't operate the queue if not configured or - * if starting only per queue */ - if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start)) - continue; - if (on) - ret = i40e_dev_rx_queue_start(dev, i); - else - ret = i40e_dev_rx_queue_stop(dev, i); - if (ret != I40E_SUCCESS) - return ret; - } - - return I40E_SUCCESS; -} - -/* Switch on or off all the rx/tx queues */ -int -i40e_dev_switch_queues(struct i40e_pf *pf, bool on) -{ - int ret; - - if (on) { - /* enable rx queues before enabling tx queues */ - ret = i40e_dev_switch_rx_queues(pf, on); - if (ret) { - PMD_DRV_LOG(ERR, "Failed to switch rx queues"); - return ret; - } - ret = i40e_dev_switch_tx_queues(pf, on); - } else { - /* Stop tx queues before stopping rx queues */ - ret = i40e_dev_switch_tx_queues(pf, on); - if (ret) { - PMD_DRV_LOG(ERR, "Failed to switch tx queues"); - return ret; - } - ret = i40e_dev_switch_rx_queues(pf, on); - } - - return ret; -} /* Initialize VSI for TX */ static int @@ -6426,8 +6386,7 @@ i40e_dev_tx_init(struct i40e_pf *pf) break; } if (ret == I40E_SUCCESS) - i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf) - ->eth_dev); + i40e_set_tx_function(&rte_eth_devices[pf->dev_data->port_id]); return ret; } @@ -6441,7 +6400,7 @@ i40e_dev_rx_init(struct i40e_pf *pf) uint16_t i; struct i40e_rx_queue *rxq; - i40e_pf_config_mq_rx(pf); + i40e_pf_config_rss(pf); for (i = 0; i < data->nb_rx_queues; i++) { rxq = data->rx_queues[i]; if (!rxq || !rxq->q_set) @@ -6455,8 +6414,7 @@ i40e_dev_rx_init(struct i40e_pf *pf) } } if (ret == I40E_SUCCESS) - i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf) - ->eth_dev); + i40e_set_rx_function(&rte_eth_devices[pf->dev_data->port_id]); return ret; } @@ -6606,9 +6564,13 @@ i40e_stat_update_48(struct i40e_hw *hw, { uint64_t new_data; - new_data = (uint64_t)I40E_READ_REG(hw, loreg); - new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) & - I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH; + if (hw->device_id == I40E_DEV_ID_QEMU) { + new_data = (uint64_t)I40E_READ_REG(hw, loreg); + new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) & + I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH; + } else { + new_data = I40E_READ_REG64(hw, loreg); + } if (!offset_loaded) *offset = new_data; @@ -6747,7 +6709,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) case i40e_aqc_opc_get_link_status: ret = i40e_dev_link_update(dev, 0); if (!ret) - _rte_eth_dev_callback_process(dev, + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); break; default: @@ -6759,6 +6721,92 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) rte_free(info.msg_buf); } +static void +i40e_handle_mdd_event(struct rte_eth_dev *dev) +{ +#define I40E_MDD_CLEAR32 0xFFFFFFFF +#define I40E_MDD_CLEAR16 0xFFFF + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + bool mdd_detected = false; + struct i40e_pf_vf *vf; + uint32_t reg; + int i; + + /* find what triggered the MDD event */ + reg = I40E_READ_REG(hw, I40E_GL_MDET_TX); + if (reg & I40E_GL_MDET_TX_VALID_MASK) { + uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> + I40E_GL_MDET_TX_PF_NUM_SHIFT; + uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> + I40E_GL_MDET_TX_VF_NUM_SHIFT; + uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> + I40E_GL_MDET_TX_EVENT_SHIFT; + uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> + I40E_GL_MDET_TX_QUEUE_SHIFT) - + hw->func_caps.base_queue; + PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX " + "queue %d PF number 0x%02x VF number 0x%02x device %s\n", + event, queue, pf_num, vf_num, dev->data->name); + I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32); + mdd_detected = true; + } + reg = I40E_READ_REG(hw, I40E_GL_MDET_RX); + if (reg & I40E_GL_MDET_RX_VALID_MASK) { + uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> + I40E_GL_MDET_RX_FUNCTION_SHIFT; + uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> + I40E_GL_MDET_RX_EVENT_SHIFT; + uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> + I40E_GL_MDET_RX_QUEUE_SHIFT) - + hw->func_caps.base_queue; + + PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX " + "queue %d of function 0x%02x device %s\n", + event, queue, func, dev->data->name); + I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32); + mdd_detected = true; + } + + if (mdd_detected) { + reg = I40E_READ_REG(hw, I40E_PF_MDET_TX); + if (reg & I40E_PF_MDET_TX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16); + PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n"); + } + reg = I40E_READ_REG(hw, I40E_PF_MDET_RX); + if (reg & I40E_PF_MDET_RX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_PF_MDET_RX, + I40E_MDD_CLEAR16); + PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n"); + } + } + + /* see if one of the VFs needs its hand slapped */ + for (i = 0; i < pf->vf_num && mdd_detected; i++) { + vf = &pf->vfs[i]; + reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i)); + if (reg & I40E_VP_MDET_TX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i), + I40E_MDD_CLEAR16); + vf->num_mdd_events++; + PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-" + PRIu64 "times\n", + i, vf->num_mdd_events); + } + + reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i)); + if (reg & I40E_VP_MDET_RX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i), + I40E_MDD_CLEAR16); + vf->num_mdd_events++; + PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-" + PRIu64 "times\n", + i, vf->num_mdd_events); + } + } +} + /** * Interrupt handler triggered by NIC for handling * specific interrupt. @@ -6791,8 +6839,10 @@ i40e_dev_interrupt_handler(void *param) } if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK) PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error"); - if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) + if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { PMD_DRV_LOG(ERR, "ICR0: malicious programming detected"); + i40e_handle_mdd_event(dev); + } if (icr0 & I40E_PFINT_ICR0_GRST_MASK) PMD_DRV_LOG(INFO, "ICR0: global reset requested"); if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) @@ -6836,8 +6886,10 @@ i40e_dev_alarm_handler(void *param) goto done; if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK) PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error"); - if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) + if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { PMD_DRV_LOG(ERR, "ICR0: malicious programming detected"); + i40e_handle_mdd_event(dev); + } if (icr0 & I40E_PFINT_ICR0_GRST_MASK) PMD_DRV_LOG(INFO, "ICR0: global reset requested"); if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) @@ -6900,18 +6952,18 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi, rte_cpu_to_le_16(filter[num + i].vlan_id); switch (filter[num + i].filter_type) { - case RTE_MAC_PERFECT_MATCH: + case I40E_MAC_PERFECT_MATCH: flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH | I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; break; - case RTE_MACVLAN_PERFECT_MATCH: + case I40E_MACVLAN_PERFECT_MATCH: flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; break; - case RTE_MAC_HASH_MATCH: + case I40E_MAC_HASH_MATCH: flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH | I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; break; - case RTE_MACVLAN_HASH_MATCH: + case I40E_MACVLAN_HASH_MATCH: flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH; break; default: @@ -6975,18 +7027,18 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi, rte_cpu_to_le_16(filter[num + i].vlan_id); switch (filter[num + i].filter_type) { - case RTE_MAC_PERFECT_MATCH: + case I40E_MAC_PERFECT_MATCH: flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; break; - case RTE_MACVLAN_PERFECT_MATCH: + case I40E_MACVLAN_PERFECT_MATCH: flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; break; - case RTE_MAC_HASH_MATCH: + case I40E_MAC_HASH_MATCH: flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH | I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; break; - case RTE_MACVLAN_HASH_MATCH: + case I40E_MACVLAN_HASH_MATCH: flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH; break; default: @@ -7331,8 +7383,8 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr); if (f != NULL) return I40E_SUCCESS; - if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) || - (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) { + if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH || + mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) { /** * If vlan_num is 0, that's the first time to add mac, @@ -7343,8 +7395,8 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) vsi->vlan_num = 1; } vlan_num = vsi->vlan_num; - } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) || - (mac_filter->filter_type == RTE_MAC_HASH_MATCH)) + } else if (mac_filter->filter_type == I40E_MAC_PERFECT_MATCH || + mac_filter->filter_type == I40E_MAC_HASH_MATCH) vlan_num = 1; mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); @@ -7359,8 +7411,8 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) ETH_ADDR_LEN); } - if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH || - mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) { + if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH || + mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) { ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, &mac_filter->mac_addr); if (ret != I40E_SUCCESS) @@ -7397,7 +7449,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr) struct i40e_mac_filter *f; struct i40e_macvlan_filter *mv_f; int i, vlan_num; - enum rte_mac_filter_type filter_type; + enum i40e_mac_filter_type filter_type; int ret = I40E_SUCCESS; /* Can't find it, return an error */ @@ -7407,14 +7459,14 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr) vlan_num = vsi->vlan_num; filter_type = f->mac_info.filter_type; - if (filter_type == RTE_MACVLAN_PERFECT_MATCH || - filter_type == RTE_MACVLAN_HASH_MATCH) { + if (filter_type == I40E_MACVLAN_PERFECT_MATCH || + filter_type == I40E_MACVLAN_HASH_MATCH) { if (vlan_num == 0) { PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0"); return I40E_ERR_PARAM; } - } else if (filter_type == RTE_MAC_PERFECT_MATCH || - filter_type == RTE_MAC_HASH_MATCH) + } else if (filter_type == I40E_MAC_PERFECT_MATCH || + filter_type == I40E_MAC_HASH_MATCH) vlan_num = 1; mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); @@ -7428,8 +7480,8 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr) rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); } - if (filter_type == RTE_MACVLAN_PERFECT_MATCH || - filter_type == RTE_MACVLAN_HASH_MATCH) { + if (filter_type == I40E_MACVLAN_PERFECT_MATCH || + filter_type == I40E_MACVLAN_HASH_MATCH) { ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr); if (ret != I40E_SUCCESS) goto DONE; @@ -7486,7 +7538,7 @@ i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags) } /* Disable RSS */ -static void +void i40e_pf_disable_rss(struct i40e_pf *pf) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); @@ -7504,7 +7556,6 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ? I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX; - int ret = 0; if (!key || key_len == 0) { PMD_DRV_LOG(DEBUG, "No key to be configured"); @@ -7517,11 +7568,16 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { struct i40e_aqc_get_set_rss_key_data *key_dw = - (struct i40e_aqc_get_set_rss_key_data *)key; + (struct i40e_aqc_get_set_rss_key_data *)key; + enum i40e_status_code status = + i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw); - ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw); - if (ret) - PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ"); + if (status) { + PMD_DRV_LOG(ERR, + "Failed to configure RSS key via AQ, error status: %d", + status); + return -EIO; + } } else { uint32_t *hash_key = (uint32_t *)key; uint16_t i; @@ -7541,7 +7597,7 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) I40E_WRITE_FLUSH(hw); } - return ret; + return 0; } static int @@ -7776,177 +7832,45 @@ i40e_sw_tunnel_filter_del(struct i40e_pf *pf, return 0; } -int -i40e_dev_tunnel_filter_set(struct i40e_pf *pf, - struct rte_eth_tunnel_filter_conf *tunnel_filter, - uint8_t add) +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48 +#define I40E_TR_VXLAN_GRE_KEY_MASK 0x4 +#define I40E_TR_GENEVE_KEY_MASK 0x8 +#define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40 +#define I40E_TR_GRE_KEY_MASK 0x400 +#define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800 +#define I40E_TR_GRE_NO_KEY_MASK 0x8000 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80 +#define I40E_DIRECTION_INGRESS_KEY 0x8000 +#define I40E_TR_L4_TYPE_TCP 0x2 +#define I40E_TR_L4_TYPE_UDP 0x4 +#define I40E_TR_L4_TYPE_SCTP 0x8 + +static enum +i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) { - uint16_t ip_type; - uint32_t ipv4_addr, ipv4_addr_le; - uint8_t i, tun_type = 0; - /* internal varialbe to convert ipv6 byte order */ - uint32_t convert_ipv6[4]; - int val, ret = 0; + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct i40e_vsi *vsi = pf->main_vsi; - struct i40e_aqc_cloud_filters_element_bb *cld_filter; - struct i40e_aqc_cloud_filters_element_bb *pfilter; - struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; - struct i40e_tunnel_filter *tunnel, *node; - struct i40e_tunnel_filter check_filter; /* Check if filter exists */ - - cld_filter = rte_zmalloc("tunnel_filter", - sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext), - 0); - - if (NULL == cld_filter) { - PMD_DRV_LOG(ERR, "Failed to alloc memory."); - return -ENOMEM; - } - pfilter = cld_filter; - - rte_ether_addr_copy(&tunnel_filter->outer_mac, - (struct rte_ether_addr *)&pfilter->element.outer_mac); - rte_ether_addr_copy(&tunnel_filter->inner_mac, - (struct rte_ether_addr *)&pfilter->element.inner_mac); - - pfilter->element.inner_vlan = - rte_cpu_to_le_16(tunnel_filter->inner_vlan); - if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) { - ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; - ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr); - ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr); - rte_memcpy(&pfilter->element.ipaddr.v4.data, - &ipv4_addr_le, - sizeof(pfilter->element.ipaddr.v4.data)); - } else { - ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; - for (i = 0; i < 4; i++) { - convert_ipv6[i] = - rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i])); - } - rte_memcpy(&pfilter->element.ipaddr.v6.data, - &convert_ipv6, - sizeof(pfilter->element.ipaddr.v6.data)); - } + struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id]; + enum i40e_status_code status = I40E_SUCCESS; - /* check tunneled type */ - switch (tunnel_filter->tunnel_type) { - case RTE_TUNNEL_TYPE_VXLAN: - tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN; - break; - case RTE_TUNNEL_TYPE_NVGRE: - tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC; - break; - case RTE_TUNNEL_TYPE_IP_IN_GRE: - tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP; - break; - case RTE_TUNNEL_TYPE_VXLAN_GPE: - tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE; - break; - default: - /* Other tunnel types is not supported. */ - PMD_DRV_LOG(ERR, "tunnel type is not supported."); - rte_free(cld_filter); - return -EINVAL; + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace l1 filter is not supported."); + return I40E_NOT_SUPPORTED; } - val = i40e_dev_get_filter_type(tunnel_filter->filter_type, - &pfilter->element.flags); - if (val < 0) { - rte_free(cld_filter); - return -EINVAL; - } + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); - pfilter->element.flags |= rte_cpu_to_le_16( - I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | - ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)); - pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); - pfilter->element.queue_number = - rte_cpu_to_le_16(tunnel_filter->queue_id); - - /* Check if there is the filter in SW list */ - memset(&check_filter, 0, sizeof(check_filter)); - i40e_tunnel_filter_convert(cld_filter, &check_filter); - node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input); - if (add && node) { - PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!"); - rte_free(cld_filter); - return -EINVAL; - } - - if (!add && !node) { - PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!"); - rte_free(cld_filter); - return -EINVAL; - } - - if (add) { - ret = i40e_aq_add_cloud_filters(hw, - vsi->seid, &cld_filter->element, 1); - if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to add a tunnel filter."); - rte_free(cld_filter); - return -ENOTSUP; - } - tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); - if (tunnel == NULL) { - PMD_DRV_LOG(ERR, "Failed to alloc memory."); - rte_free(cld_filter); - return -ENOMEM; - } - - rte_memcpy(tunnel, &check_filter, sizeof(check_filter)); - ret = i40e_sw_tunnel_filter_insert(pf, tunnel); - if (ret < 0) - rte_free(tunnel); - } else { - ret = i40e_aq_rem_cloud_filters(hw, vsi->seid, - &cld_filter->element, 1); - if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); - rte_free(cld_filter); - return -ENOTSUP; - } - ret = i40e_sw_tunnel_filter_del(pf, &node->input); - } - - rte_free(cld_filter); - return ret; -} - -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48 -#define I40E_TR_VXLAN_GRE_KEY_MASK 0x4 -#define I40E_TR_GENEVE_KEY_MASK 0x8 -#define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40 -#define I40E_TR_GRE_KEY_MASK 0x400 -#define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800 -#define I40E_TR_GRE_NO_KEY_MASK 0x8000 - -static enum -i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) -{ - struct i40e_aqc_replace_cloud_filters_cmd filter_replace; - struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; - struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; - enum i40e_status_code status = I40E_SUCCESS; - - if (pf->support_multi_driver) { - PMD_DRV_LOG(ERR, "Replace l1 filter is not supported."); - return I40E_NOT_SUPPORTED; - } - - memset(&filter_replace, 0, - sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); - memset(&filter_replace_buf, 0, - sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); - - /* create L1 filter */ - filter_replace.old_filter_type = - I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC; - filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11; - filter_replace.tr_bit = 0; + /* create L1 filter */ + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11; + filter_replace.tr_bit = 0; /* Prepare the buffer, 3 entries */ filter_replace_buf.data[0] = @@ -7990,7 +7914,7 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) struct i40e_aqc_replace_cloud_filters_cmd filter_replace; struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id]; enum i40e_status_code status = I40E_SUCCESS; if (pf->support_multi_driver) { @@ -8065,7 +7989,7 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf) struct i40e_aqc_replace_cloud_filters_cmd filter_replace; struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id]; enum i40e_status_code status = I40E_SUCCESS; if (pf->support_multi_driver) { @@ -8153,7 +8077,7 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf) struct i40e_aqc_replace_cloud_filters_cmd filter_replace; struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id]; enum i40e_status_code status = I40E_SUCCESS; if (pf->support_multi_driver) { @@ -8220,6 +8144,132 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf) return status; } +static enum i40e_status_code +i40e_replace_port_l1_filter(struct i40e_pf *pf, + enum i40e_l4_port_type l4_port_type) +{ + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id]; + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace l1 filter is not supported."); + return I40E_NOT_SUPPORTED; + } + + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + /* create L1 filter */ + if (l4_port_type == I40E_L4_PORT_TYPE_SRC) { + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11; + filter_replace_buf.data[8] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT; + } else { + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10; + filter_replace_buf.data[8] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT; + } + + filter_replace.tr_bit = 0; + /* Prepare the buffer, 3 entries */ + filter_replace_buf.data[0] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[2] = 0x00; + filter_replace_buf.data[3] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0; + filter_replace_buf.data[4] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[5] = 0x00; + filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP | + I40E_TR_L4_TYPE_TCP | + I40E_TR_L4_TYPE_SCTP; + filter_replace_buf.data[7] = 0x00; + filter_replace_buf.data[8] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[9] = 0x00; + filter_replace_buf.data[10] = 0xFF; + filter_replace_buf.data[11] = 0xFF; + + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (!status && filter_replace.old_filter_type != + filter_replace.new_filter_type) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); + + return status; +} + +static enum i40e_status_code +i40e_replace_port_cloud_filter(struct i40e_pf *pf, + enum i40e_l4_port_type l4_port_type) +{ + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id]; + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace cloud filter is not supported."); + return I40E_NOT_SUPPORTED; + } + + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + if (l4_port_type == I40E_L4_PORT_TYPE_SRC) { + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_0X11; + filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11; + } else { + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_0X10; + filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10; + } + + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER; + filter_replace.tr_bit = 0; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + + if (!status && filter_replace.old_filter_type != + filter_replace.new_filter_type) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); + + return status; +} + int i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, struct i40e_tunnel_filter_conf *tunnel_filter, @@ -8367,6 +8417,62 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, pfilter->general_fields[0] = tunnel_filter->inner_vlan; pfilter->general_fields[1] = tunnel_filter->outer_vlan; big_buffer = 1; + break; + case I40E_CLOUD_TYPE_UDP: + case I40E_CLOUD_TYPE_TCP: + case I40E_CLOUD_TYPE_SCTP: + if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) { + if (!pf->sport_replace_flag) { + i40e_replace_port_l1_filter(pf, + tunnel_filter->l4_port_type); + i40e_replace_port_cloud_filter(pf, + tunnel_filter->l4_port_type); + pf->sport_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] = + I40E_DIRECTION_INGRESS_KEY; + + if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP) + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] = + I40E_TR_L4_TYPE_UDP; + else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP) + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] = + I40E_TR_L4_TYPE_TCP; + else + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] = + I40E_TR_L4_TYPE_SCTP; + + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] = + (teid_le >> 16) & 0xFFFF; + big_buffer = 1; + } else { + if (!pf->dport_replace_flag) { + i40e_replace_port_l1_filter(pf, + tunnel_filter->l4_port_type); + i40e_replace_port_cloud_filter(pf, + tunnel_filter->l4_port_type); + pf->dport_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] = + I40E_DIRECTION_INGRESS_KEY; + + if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP) + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] = + I40E_TR_L4_TYPE_UDP; + else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP) + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] = + I40E_TR_L4_TYPE_TCP; + else + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] = + I40E_TR_L4_TYPE_SCTP; + + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] = + (teid_le >> 16) & 0xFFFF; + big_buffer = 1; + } + break; default: /* Other tunnel types is not supported. */ @@ -8390,7 +8496,16 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ) pfilter->element.flags |= I40E_AQC_ADD_CLOUD_FILTER_0X10; - else { + else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP || + tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP || + tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) { + if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) + pfilter->element.flags |= + I40E_AQC_ADD_CLOUD_FILTER_0X11; + else + pfilter->element.flags |= + I40E_AQC_ADD_CLOUD_FILTER_0X10; + } else { val = i40e_dev_get_filter_type(tunnel_filter->filter_type, &pfilter->element.flags); if (val < 0) { @@ -8636,7 +8751,7 @@ i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, } /* Calculate the maximum number of contiguous PF queues that are configured */ -static int +int i40e_pf_calc_configured_queues_num(struct i40e_pf *pf) { struct rte_eth_dev_data *data = pf->dev_data; @@ -8655,18 +8770,72 @@ i40e_pf_calc_configured_queues_num(struct i40e_pf *pf) return num; } -/* Configure RSS */ -static int -i40e_pf_config_rss(struct i40e_pf *pf) +/* Reset the global configure of hash function and input sets */ +static void +i40e_pf_global_rss_reset(struct i40e_pf *pf) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_eth_rss_conf rss_conf; - uint32_t i, lut = 0; - uint16_t j, num; + uint32_t reg, reg_val; + int i; - /* - * If both VMDQ and RSS enabled, not all of PF queues are configured. - * It's necessary to calculate the actual PF queues that are configured. + /* Reset global RSS function sets */ + reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); + if (!(reg_val & I40E_GLQF_CTL_HTOEP_MASK)) { + reg_val |= I40E_GLQF_CTL_HTOEP_MASK; + i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg_val); + } + + for (i = 0; i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; i++) { + uint64_t inset; + int j, pctype; + + if (hw->mac.type == I40E_MAC_X722) + pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(i)); + else + pctype = i; + + /* Reset pctype insets */ + inset = i40e_get_default_input_set(i); + if (inset) { + pf->hash_input_set[pctype] = inset; + inset = i40e_translate_input_set_reg(hw->mac.type, + inset); + + reg = I40E_GLQF_HASH_INSET(0, pctype); + i40e_check_write_global_reg(hw, reg, (uint32_t)inset); + reg = I40E_GLQF_HASH_INSET(1, pctype); + i40e_check_write_global_reg(hw, reg, + (uint32_t)(inset >> 32)); + + /* Clear unused mask registers of the pctype */ + for (j = 0; j < I40E_INSET_MASK_NUM_REG; j++) { + reg = I40E_GLQF_HASH_MSK(j, pctype); + i40e_check_write_global_reg(hw, reg, 0); + } + } + + /* Reset pctype symmetric sets */ + reg = I40E_GLQF_HSYM(pctype); + reg_val = i40e_read_rx_ctl(hw, reg); + if (reg_val & I40E_GLQF_HSYM_SYMH_ENA_MASK) { + reg_val &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK; + i40e_write_global_rx_ctl(hw, reg, reg_val); + } + } + I40E_WRITE_FLUSH(hw); +} + +int +i40e_pf_reset_rss_reta(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->adapter->hw; + uint8_t lut[ETH_RSS_RETA_SIZE_512]; + uint32_t i; + int num; + + /* If both VMDQ and RSS enabled, not all of PF queues are + * configured. It's necessary to calculate the actual PF + * queues that are configured. */ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) num = i40e_pf_calc_configured_queues_num(pf); @@ -8674,84 +8843,94 @@ i40e_pf_config_rss(struct i40e_pf *pf) num = pf->dev_data->nb_rx_queues; num = RTE_MIN(num, I40E_MAX_Q_PER_TC); - PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured", - num); + if (num <= 0) + return 0; - if (num == 0) { - PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS"); - return -ENOTSUP; - } + for (i = 0; i < hw->func_caps.rss_table_size; i++) + lut[i] = (uint8_t)(i % (uint32_t)num); - if (pf->adapter->rss_reta_updated == 0) { - for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { - if (j == num) - j = 0; - lut = (lut << 8) | (j & ((0x1 << - hw->func_caps.rss_table_entry_width) - 1)); - if ((i & 3) == 3) - I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), - rte_bswap32(lut)); - } - } + return i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i); +} - rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf; - if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) { - i40e_pf_disable_rss(pf); - return 0; - } - if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < - (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) { - /* Random default keys */ +int +i40e_pf_reset_rss_key(struct i40e_pf *pf) +{ + const uint8_t key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + uint8_t *rss_key; + + /* Reset key */ + rss_key = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key; + if (!rss_key || + pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key_len < key_len) { static uint32_t rss_key_default[] = {0x6b793944, 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8, 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605, 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581}; - rss_conf.rss_key = (uint8_t *)rss_key_default; - rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * - sizeof(uint32_t); + rss_key = (uint8_t *)rss_key_default; } - return i40e_hw_rss_hash_set(pf, &rss_conf); + return i40e_set_rss_key(pf->main_vsi, rss_key, key_len); } static int -i40e_tunnel_filter_param_check(struct i40e_pf *pf, - struct rte_eth_tunnel_filter_conf *filter) +i40e_pf_rss_reset(struct i40e_pf *pf) { - if (pf == NULL || filter == NULL) { - PMD_DRV_LOG(ERR, "Invalid parameter"); - return -EINVAL; - } + struct i40e_hw *hw = I40E_PF_TO_HW(pf); - if (filter->queue_id >= pf->dev_data->nb_rx_queues) { - PMD_DRV_LOG(ERR, "Invalid queue ID"); - return -EINVAL; - } + int ret; - if (filter->inner_vlan > RTE_ETHER_MAX_VLAN_ID) { - PMD_DRV_LOG(ERR, "Invalid inner VLAN ID"); - return -EINVAL; - } + pf->hash_filter_enabled = 0; + i40e_pf_disable_rss(pf); + i40e_set_symmetric_hash_enable_per_port(hw, 0); - if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) && - (rte_is_zero_ether_addr(&filter->outer_mac))) { - PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address"); - return -EINVAL; + if (!pf->support_multi_driver) + i40e_pf_global_rss_reset(pf); + + /* Reset RETA table */ + if (pf->adapter->rss_reta_updated == 0) { + ret = i40e_pf_reset_rss_reta(pf); + if (ret) + return ret; } - if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) && - (rte_is_zero_ether_addr(&filter->inner_mac))) { - PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address"); - return -EINVAL; + return i40e_pf_reset_rss_key(pf); +} + +/* Configure RSS */ +int +i40e_pf_config_rss(struct i40e_pf *pf) +{ + struct i40e_hw *hw; + enum rte_eth_rx_mq_mode mq_mode; + uint64_t rss_hf, hena; + int ret; + + ret = i40e_pf_rss_reset(pf); + if (ret) { + PMD_DRV_LOG(ERR, "Reset RSS failed, RSS has been disabled"); + return ret; } + rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf; + mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode; + if (!(rss_hf & pf->adapter->flow_types_mask) || + !(mq_mode & ETH_MQ_RX_RSS_FLAG)) + return 0; + + hw = I40E_PF_TO_HW(pf); + hena = i40e_config_hena(pf->adapter, rss_hf); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); + I40E_WRITE_FLUSH(hw); + return 0; } #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000 #define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) -static int +int i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) { struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; @@ -8793,294 +8972,27 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) return ret; } -static int -i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg) -{ - int ret = -EINVAL; - - if (!hw || !cfg) - return -EINVAL; - - switch (cfg->cfg_type) { - case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN: - ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len); - break; - default: - PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type); - break; - } - - return ret; -} - -static int -i40e_filter_ctrl_global_config(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) -{ - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret = I40E_ERR_PARAM; - - switch (filter_op) { - case RTE_ETH_FILTER_SET: - ret = i40e_dev_global_config_set(hw, - (struct rte_eth_global_cfg *)arg); - break; - default: - PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); - break; - } - - return ret; -} - -static int -i40e_tunnel_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) -{ - struct rte_eth_tunnel_filter_conf *filter; - struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - int ret = I40E_SUCCESS; - - filter = (struct rte_eth_tunnel_filter_conf *)(arg); - - if (i40e_tunnel_filter_param_check(pf, filter) < 0) - return I40E_ERR_PARAM; - - switch (filter_op) { - case RTE_ETH_FILTER_NOP: - if (!(pf->flags & I40E_FLAG_VXLAN)) - ret = I40E_NOT_SUPPORTED; - break; - case RTE_ETH_FILTER_ADD: - ret = i40e_dev_tunnel_filter_set(pf, filter, 1); - break; - case RTE_ETH_FILTER_DELETE: - ret = i40e_dev_tunnel_filter_set(pf, filter, 0); - break; - default: - PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); - ret = I40E_ERR_PARAM; - break; - } - - return ret; -} - -static int -i40e_pf_config_mq_rx(struct i40e_pf *pf) -{ - int ret = 0; - enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode; - - /* RSS setup */ - if (mq_mode & ETH_MQ_RX_RSS_FLAG) - ret = i40e_pf_config_rss(pf); - else - i40e_pf_disable_rss(pf); - - return ret; -} - -/* Get the symmetric hash enable configurations per port */ -static void -i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable) -{ - uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0); - - *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0; -} - /* Set the symmetric hash enable configurations per port */ -static void +void i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable) { uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0); if (enable > 0) { - if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) { - PMD_DRV_LOG(INFO, - "Symmetric hash has already been enabled"); + if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) return; - } + reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK; } else { - if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) { - PMD_DRV_LOG(INFO, - "Symmetric hash has already been disabled"); + if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) return; - } + reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK; } i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg); I40E_WRITE_FLUSH(hw); } -/* - * Get global configurations of hash function type and symmetric hash enable - * per flow type (pctype). Note that global configuration means it affects all - * the ports on the same NIC. - */ -static int -i40e_get_hash_filter_global_config(struct i40e_hw *hw, - struct rte_eth_hash_global_conf *g_cfg) -{ - struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back; - uint32_t reg; - uint16_t i, j; - - memset(g_cfg, 0, sizeof(*g_cfg)); - reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); - if (reg & I40E_GLQF_CTL_HTOEP_MASK) - g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ; - else - g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR; - PMD_DRV_LOG(DEBUG, "Hash function is %s", - (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR"); - - /* - * As i40e supports less than 64 flow types, only first 64 bits need to - * be checked. - */ - for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) { - g_cfg->valid_bit_mask[i] = 0ULL; - g_cfg->sym_hash_enable_mask[i] = 0ULL; - } - - g_cfg->valid_bit_mask[0] = adapter->flow_types_mask; - - for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) { - if (!adapter->pctypes_tbl[i]) - continue; - for (j = I40E_FILTER_PCTYPE_INVALID + 1; - j < I40E_FILTER_PCTYPE_MAX; j++) { - if (adapter->pctypes_tbl[i] & (1ULL << j)) { - reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j)); - if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) { - g_cfg->sym_hash_enable_mask[0] |= - (1ULL << i); - } - } - } - } - - return 0; -} - -static int -i40e_hash_global_config_check(const struct i40e_adapter *adapter, - const struct rte_eth_hash_global_conf *g_cfg) -{ - uint32_t i; - uint64_t mask0, i40e_mask = adapter->flow_types_mask; - - if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ && - g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR && - g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) { - PMD_DRV_LOG(ERR, "Unsupported hash function type %d", - g_cfg->hash_func); - return -EINVAL; - } - - /* - * As i40e supports less than 64 flow types, only first 64 bits need to - * be checked. - */ - mask0 = g_cfg->valid_bit_mask[0]; - for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) { - if (i == 0) { - /* Check if any unsupported flow type configured */ - if ((mask0 | i40e_mask) ^ i40e_mask) - goto mask_err; - } else { - if (g_cfg->valid_bit_mask[i]) - goto mask_err; - } - } - - return 0; - -mask_err: - PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured"); - - return -EINVAL; -} - -/* - * Set global configurations of hash function type and symmetric hash enable - * per flow type (pctype). Note any modifying global configuration will affect - * all the ports on the same NIC. - */ -static int -i40e_set_hash_filter_global_config(struct i40e_hw *hw, - struct rte_eth_hash_global_conf *g_cfg) -{ - struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back; - struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; - int ret; - uint16_t i, j; - uint32_t reg; - uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask; - - if (pf->support_multi_driver) { - PMD_DRV_LOG(ERR, "Hash global configuration is not supported."); - return -ENOTSUP; - } - - /* Check the input parameters */ - ret = i40e_hash_global_config_check(adapter, g_cfg); - if (ret < 0) - return ret; - - /* - * As i40e supports less than 64 flow types, only first 64 bits need to - * be configured. - */ - for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) { - if (mask0 & (1UL << i)) { - reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ? - I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; - - for (j = I40E_FILTER_PCTYPE_INVALID + 1; - j < I40E_FILTER_PCTYPE_MAX; j++) { - if (adapter->pctypes_tbl[i] & (1ULL << j)) - i40e_write_global_rx_ctl(hw, - I40E_GLQF_HSYM(j), - reg); - } - } - } - - reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); - if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) { - /* Toeplitz */ - if (reg & I40E_GLQF_CTL_HTOEP_MASK) { - PMD_DRV_LOG(DEBUG, - "Hash function already set to Toeplitz"); - goto out; - } - reg |= I40E_GLQF_CTL_HTOEP_MASK; - } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { - /* Simple XOR */ - if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) { - PMD_DRV_LOG(DEBUG, - "Hash function already set to Simple XOR"); - goto out; - } - reg &= ~I40E_GLQF_CTL_HTOEP_MASK; - } else - /* Use the default, and keep it as it is */ - goto out; - - i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg); - -out: - I40E_WRITE_FLUSH(hw); - - return 0; -} - /** * Valid input sets for hash and flow director filters per PCTYPE */ @@ -9248,6 +9160,7 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | @@ -9263,6 +9176,7 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | @@ -9279,6 +9193,7 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT, [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = + I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO | @@ -9416,103 +9331,6 @@ i40e_get_default_input_set(uint16_t pctype) return default_inset_table[pctype]; } -/** - * Parse the input set from index to logical bit masks - */ -static int -i40e_parse_input_set(uint64_t *inset, - enum i40e_filter_pctype pctype, - enum rte_eth_input_set_field *field, - uint16_t size) -{ - uint16_t i, j; - int ret = -EINVAL; - - static const struct { - enum rte_eth_input_set_field field; - uint64_t inset; - } inset_convert_table[] = { - {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE}, - {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC}, - {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC}, - {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER}, - {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER}, - {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE}, - {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC}, - {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST}, - {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS}, - {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO}, - {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL}, - {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC}, - {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST}, - {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC}, - {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER, - I40E_INSET_IPV6_NEXT_HDR}, - {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS, - I40E_INSET_IPV6_HOP_LIMIT}, - {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT}, - {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT}, - {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT}, - {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT}, - {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT}, - {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT}, - {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG, - I40E_INSET_SCTP_VT}, - {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC, - I40E_INSET_TUNNEL_DMAC}, - {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN, - I40E_INSET_VLAN_TUNNEL}, - {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY, - I40E_INSET_TUNNEL_ID}, - {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD, - I40E_INSET_FLEX_PAYLOAD_W1}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD, - I40E_INSET_FLEX_PAYLOAD_W2}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD, - I40E_INSET_FLEX_PAYLOAD_W3}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD, - I40E_INSET_FLEX_PAYLOAD_W4}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD, - I40E_INSET_FLEX_PAYLOAD_W5}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD, - I40E_INSET_FLEX_PAYLOAD_W6}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD, - I40E_INSET_FLEX_PAYLOAD_W7}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD, - I40E_INSET_FLEX_PAYLOAD_W8}, - }; - - if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX) - return ret; - - /* Only one item allowed for default or all */ - if (size == 1) { - if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) { - *inset = i40e_get_default_input_set(pctype); - return 0; - } else if (field[0] == RTE_ETH_INPUT_SET_NONE) { - *inset = I40E_INSET_NONE; - return 0; - } - } - - for (i = 0, *inset = 0; i < size; i++) { - for (j = 0; j < RTE_DIM(inset_convert_table); j++) { - if (field[i] == inset_convert_table[j].field) { - *inset |= inset_convert_table[j].inset; - break; - } - } - - /* It contains unsupported input set, return immediately */ - if (j == RTE_DIM(inset_convert_table)) - return ret; - } - - return 0; -} - /** * Translate the input set from bit masks to register aware bit masks * and vice versa @@ -9602,51 +9420,118 @@ i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input) return val; } +static int +i40e_get_inset_field_offset(struct i40e_hw *hw, uint32_t pit_reg_start, + uint32_t pit_reg_count, uint32_t hdr_off) +{ + const uint32_t pit_reg_end = pit_reg_start + pit_reg_count; + uint32_t field_off = I40E_FDIR_FIELD_OFFSET(hdr_off); + uint32_t i, reg_val, src_off, count; + + for (i = pit_reg_start; i < pit_reg_end; i++) { + reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_PIT(i)); + + src_off = I40E_GLQF_PIT_SOURCE_OFF_GET(reg_val); + count = I40E_GLQF_PIT_FSIZE_GET(reg_val); + + if (src_off <= field_off && (src_off + count) > field_off) + break; + } + + if (i >= pit_reg_end) { + PMD_DRV_LOG(ERR, + "Hardware GLQF_PIT configuration does not support this field mask"); + return -1; + } + + return I40E_GLQF_PIT_DEST_OFF_GET(reg_val) + field_off - src_off; +} + int -i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem) +i40e_generate_inset_mask_reg(struct i40e_hw *hw, uint64_t inset, + uint32_t *mask, uint8_t nb_elem) { - uint8_t i, idx = 0; - uint64_t inset_need_mask = inset; + static const uint64_t mask_inset[] = { + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, + I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT }; static const struct { uint64_t inset; uint32_t mask; - } inset_mask_map[] = { - {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK}, - {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0}, - {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK}, - {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK}, - {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK}, - {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0}, - {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK}, - {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK}, + uint32_t offset; + } inset_mask_offset_map[] = { + { I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK, + offsetof(struct rte_ipv4_hdr, type_of_service) }, + + { I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK, + offsetof(struct rte_ipv4_hdr, next_proto_id) }, + + { I40E_INSET_IPV4_TTL, I40E_INSET_IPV4_TTL_MASK, + offsetof(struct rte_ipv4_hdr, time_to_live) }, + + { I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK, + offsetof(struct rte_ipv6_hdr, vtc_flow) }, + + { I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK, + offsetof(struct rte_ipv6_hdr, proto) }, + + { I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK, + offsetof(struct rte_ipv6_hdr, hop_limits) }, }; - if (!inset || !mask || !nb_elem) + uint32_t i; + int idx = 0; + + assert(mask); + if (!inset) return 0; - for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) { + for (i = 0; i < RTE_DIM(mask_inset); i++) { /* Clear the inset bit, if no MASK is required, * for example proto + ttl */ - if ((inset & inset_mask_map[i].inset) == - inset_mask_map[i].inset && inset_mask_map[i].mask == 0) - inset_need_mask &= ~inset_mask_map[i].inset; - if (!inset_need_mask) - return 0; - } - for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) { - if ((inset_need_mask & inset_mask_map[i].inset) == - inset_mask_map[i].inset) { - if (idx >= nb_elem) { - PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks"); - return -EINVAL; - } - mask[idx] = inset_mask_map[i].mask; - idx++; + if ((mask_inset[i] & inset) == mask_inset[i]) { + inset &= ~mask_inset[i]; + if (!inset) + return 0; } } + for (i = 0; i < RTE_DIM(inset_mask_offset_map); i++) { + uint32_t pit_start, pit_count; + int offset; + + if (!(inset_mask_offset_map[i].inset & inset)) + continue; + + if (inset_mask_offset_map[i].inset & + (I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO | + I40E_INSET_IPV4_TTL)) { + pit_start = I40E_GLQF_PIT_IPV4_START; + pit_count = I40E_GLQF_PIT_IPV4_COUNT; + } else { + pit_start = I40E_GLQF_PIT_IPV6_START; + pit_count = I40E_GLQF_PIT_IPV6_COUNT; + } + + offset = i40e_get_inset_field_offset(hw, pit_start, pit_count, + inset_mask_offset_map[i].offset); + + if (offset < 0) + return -EINVAL; + + if (idx >= nb_elem) { + PMD_DRV_LOG(ERR, + "Configuration of inset mask out of range %u", + nb_elem); + return -ERANGE; + } + + mask[idx] = I40E_GLQF_PIT_BUILD((uint32_t)offset, + inset_mask_offset_map[i].mask); + idx++; + } + return idx; } @@ -9666,9 +9551,10 @@ void i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) { uint32_t reg = i40e_read_rx_ctl(hw, addr); - struct rte_eth_dev *dev; + struct rte_eth_dev_data *dev_data = + ((struct i40e_adapter *)hw->back)->pf.dev_data; + struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id]; - dev = ((struct i40e_adapter *)hw->back)->eth_dev; if (reg != val) { i40e_write_rx_ctl(hw, addr, val); PMD_DRV_LOG(WARNING, @@ -9698,7 +9584,7 @@ i40e_filter_input_set_init(struct i40e_pf *pf) input_set = i40e_get_default_input_set(pctype); - num = i40e_generate_inset_mask_reg(input_set, mask_reg, + num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg, I40E_INSET_MASK_NUM_REG); if (num < 0) return; @@ -9753,57 +9639,32 @@ i40e_filter_input_set_init(struct i40e_pf *pf) } int -i40e_hash_filter_inset_select(struct i40e_hw *hw, - struct rte_eth_input_set_conf *conf) +i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set, + uint32_t pctype, bool add) { struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; - enum i40e_filter_pctype pctype; - uint64_t input_set, inset_reg = 0; uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0}; - int ret, i, num; - - if (!conf) { - PMD_DRV_LOG(ERR, "Invalid pointer"); - return -EFAULT; - } - if (conf->op != RTE_ETH_INPUT_SET_SELECT && - conf->op != RTE_ETH_INPUT_SET_ADD) { - PMD_DRV_LOG(ERR, "Unsupported input set operation"); - return -EINVAL; - } + uint64_t inset_reg = 0; + int num, i; if (pf->support_multi_driver) { - PMD_DRV_LOG(ERR, "Hash input set setting is not supported."); - return -ENOTSUP; - } - - pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type); - if (pctype == I40E_FILTER_PCTYPE_INVALID) { - PMD_DRV_LOG(ERR, "invalid flow_type input."); - return -EINVAL; - } - - if (hw->mac.type == I40E_MAC_X722) { - /* get translated pctype value in fd pctype register */ - pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw, - I40E_GLQF_FD_PCTYPES((int)pctype)); + PMD_DRV_LOG(ERR, + "Modify input set is not permitted when multi-driver enabled."); + return -EPERM; } - ret = i40e_parse_input_set(&input_set, pctype, conf->field, - conf->inset_size); - if (ret) { - PMD_DRV_LOG(ERR, "Failed to parse input set"); - return -EINVAL; - } + /* For X722, get translated pctype in fd pctype register */ + if (hw->mac.type == I40E_MAC_X722) + pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype)); - if (conf->op == RTE_ETH_INPUT_SET_ADD) { + if (add) { /* get inset value in register */ inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype)); inset_reg <<= I40E_32_BIT_WIDTH; inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype)); input_set |= pf->hash_input_set[pctype]; } - num = i40e_generate_inset_mask_reg(input_set, mask_reg, + num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg, I40E_INSET_MASK_NUM_REG); if (num < 0) return -EINVAL; @@ -9825,183 +9686,8 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, 0); I40E_WRITE_FLUSH(hw); - pf->hash_input_set[pctype] = input_set; - return 0; -} - -int -i40e_fdir_filter_inset_select(struct i40e_pf *pf, - struct rte_eth_input_set_conf *conf) -{ - struct i40e_hw *hw = I40E_PF_TO_HW(pf); - enum i40e_filter_pctype pctype; - uint64_t input_set, inset_reg = 0; - uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0}; - int ret, i, num; - - if (!hw || !conf) { - PMD_DRV_LOG(ERR, "Invalid pointer"); - return -EFAULT; - } - if (conf->op != RTE_ETH_INPUT_SET_SELECT && - conf->op != RTE_ETH_INPUT_SET_ADD) { - PMD_DRV_LOG(ERR, "Unsupported input set operation"); - return -EINVAL; - } - - pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type); - - if (pctype == I40E_FILTER_PCTYPE_INVALID) { - PMD_DRV_LOG(ERR, "invalid flow_type input."); - return -EINVAL; - } - - ret = i40e_parse_input_set(&input_set, pctype, conf->field, - conf->inset_size); - if (ret) { - PMD_DRV_LOG(ERR, "Failed to parse input set"); - return -EINVAL; - } - - /* get inset value in register */ - inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1)); - inset_reg <<= I40E_32_BIT_WIDTH; - inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0)); - - /* Can not change the inset reg for flex payload for fdir, - * it is done by writing I40E_PRTQF_FD_FLXINSET - * in i40e_set_flex_mask_on_pctype. - */ - if (conf->op == RTE_ETH_INPUT_SET_SELECT) - inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS; - else - input_set |= pf->fdir.input_set[pctype]; - num = i40e_generate_inset_mask_reg(input_set, mask_reg, - I40E_INSET_MASK_NUM_REG); - if (num < 0) - return -EINVAL; - if (pf->support_multi_driver && num > 0) { - PMD_DRV_LOG(ERR, "FDIR bit mask is not supported."); - return -ENOTSUP; - } - - inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set); - - i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0), - (uint32_t)(inset_reg & UINT32_MAX)); - i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1), - (uint32_t)((inset_reg >> - I40E_32_BIT_WIDTH) & UINT32_MAX)); - - if (!pf->support_multi_driver) { - for (i = 0; i < num; i++) - i40e_check_write_global_reg(hw, - I40E_GLQF_FD_MSK(i, pctype), - mask_reg[i]); - /*clear unused mask registers of the pctype */ - for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) - i40e_check_write_global_reg(hw, - I40E_GLQF_FD_MSK(i, pctype), - 0); - } else { - PMD_DRV_LOG(ERR, "FDIR bit mask is not supported."); - } - I40E_WRITE_FLUSH(hw); - - pf->fdir.input_set[pctype] = input_set; - return 0; -} - -static int -i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info) -{ - int ret = 0; - - if (!hw || !info) { - PMD_DRV_LOG(ERR, "Invalid pointer"); - return -EFAULT; - } - - switch (info->info_type) { - case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT: - i40e_get_symmetric_hash_enable_per_port(hw, - &(info->info.enable)); - break; - case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG: - ret = i40e_get_hash_filter_global_config(hw, - &(info->info.global_conf)); - break; - default: - PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported", - info->info_type); - ret = -EINVAL; - break; - } - - return ret; -} - -static int -i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info) -{ - int ret = 0; - - if (!hw || !info) { - PMD_DRV_LOG(ERR, "Invalid pointer"); - return -EFAULT; - } - - switch (info->info_type) { - case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT: - i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable); - break; - case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG: - ret = i40e_set_hash_filter_global_config(hw, - &(info->info.global_conf)); - break; - case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT: - ret = i40e_hash_filter_inset_select(hw, - &(info->info.input_set_conf)); - break; - - default: - PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported", - info->info_type); - ret = -EINVAL; - break; - } - - return ret; -} - -/* Operations for hash function */ -static int -i40e_hash_filter_ctrl(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) -{ - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret = 0; - - switch (filter_op) { - case RTE_ETH_FILTER_NOP: - break; - case RTE_ETH_FILTER_GET: - ret = i40e_hash_filter_get(hw, - (struct rte_eth_hash_filter_info *)arg); - break; - case RTE_ETH_FILTER_SET: - ret = i40e_hash_filter_set(hw, - (struct rte_eth_hash_filter_info *)arg); - break; - default: - PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported", - filter_op); - ret = -ENOTSUP; - break; - } - - return ret; + pf->hash_input_set[pctype] = input_set; + return 0; } /* Convert ethertype filter structure */ @@ -10169,89 +9855,15 @@ i40e_ethertype_filter_set(struct i40e_pf *pf, return ret; } -/* - * Handle operations for ethertype filter. - */ -static int -i40e_ethertype_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) -{ - struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - int ret = 0; - - if (filter_op == RTE_ETH_FILTER_NOP) - return ret; - - if (arg == NULL) { - PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", - filter_op); - return -EINVAL; - } - - switch (filter_op) { - case RTE_ETH_FILTER_ADD: - ret = i40e_ethertype_filter_set(pf, - (struct rte_eth_ethertype_filter *)arg, - TRUE); - break; - case RTE_ETH_FILTER_DELETE: - ret = i40e_ethertype_filter_set(pf, - (struct rte_eth_ethertype_filter *)arg, - FALSE); - break; - default: - PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); - ret = -ENOSYS; - break; - } - return ret; -} - static int -i40e_dev_filter_ctrl(struct rte_eth_dev *dev, - enum rte_filter_type filter_type, - enum rte_filter_op filter_op, - void *arg) +i40e_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops) { - int ret = 0; - if (dev == NULL) return -EINVAL; - switch (filter_type) { - case RTE_ETH_FILTER_NONE: - /* For global configuration */ - ret = i40e_filter_ctrl_global_config(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_HASH: - ret = i40e_hash_filter_ctrl(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_MACVLAN: - ret = i40e_mac_filter_handle(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_ETHERTYPE: - ret = i40e_ethertype_filter_handle(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_TUNNEL: - ret = i40e_tunnel_filter_handle(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_FDIR: - ret = i40e_fdir_ctrl_func(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_GENERIC: - if (filter_op != RTE_ETH_FILTER_GET) - return -EINVAL; - *(const void **)arg = &i40e_flow_ops; - break; - default: - PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", - filter_type); - ret = -EINVAL; - break; - } - - return ret; + *ops = &i40e_flow_ops; + return 0; } /* @@ -10410,6 +10022,7 @@ i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value) { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) }, { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) }, { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) }, + { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) }, { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) }, { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) }, @@ -10541,7 +10154,6 @@ i40e_configure_registers(struct i40e_hw *hw) } } -#define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4)) #define I40E_VSI_TSR_QINQ_CONFIG 0xc030 #define I40E_VSI_L2TAGSTXVALID(_i) (0x00042800 + ((_i) * 4)) #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab @@ -10588,323 +10200,6 @@ i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi) return 0; } -/** - * i40e_aq_add_mirror_rule - * @hw: pointer to the hardware structure - * @seid: VEB seid to add mirror rule to - * @dst_id: destination vsi seid - * @entries: Buffer which contains the entities to be mirrored - * @count: number of entities contained in the buffer - * @rule_id:the rule_id of the rule to be added - * - * Add a mirror rule for a given veb. - * - **/ -static enum i40e_status_code -i40e_aq_add_mirror_rule(struct i40e_hw *hw, - uint16_t seid, uint16_t dst_id, - uint16_t rule_type, uint16_t *entries, - uint16_t count, uint16_t *rule_id) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_add_delete_mirror_rule cmd; - struct i40e_aqc_add_delete_mirror_rule_completion *resp = - (struct i40e_aqc_add_delete_mirror_rule_completion *) - &desc.params.raw; - uint16_t buff_len; - enum i40e_status_code status; - - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_add_mirror_rule); - memset(&cmd, 0, sizeof(cmd)); - - buff_len = sizeof(uint16_t) * count; - desc.datalen = rte_cpu_to_le_16(buff_len); - if (buff_len > 0) - desc.flags |= rte_cpu_to_le_16( - (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); - cmd.rule_type = rte_cpu_to_le_16(rule_type << - I40E_AQC_MIRROR_RULE_TYPE_SHIFT); - cmd.num_entries = rte_cpu_to_le_16(count); - cmd.seid = rte_cpu_to_le_16(seid); - cmd.destination = rte_cpu_to_le_16(dst_id); - - rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd)); - status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL); - PMD_DRV_LOG(INFO, - "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,", - hw->aq.asq_last_status, resp->rule_id, - resp->mirror_rules_used, resp->mirror_rules_free); - *rule_id = rte_le_to_cpu_16(resp->rule_id); - - return status; -} - -/** - * i40e_aq_del_mirror_rule - * @hw: pointer to the hardware structure - * @seid: VEB seid to add mirror rule to - * @entries: Buffer which contains the entities to be mirrored - * @count: number of entities contained in the buffer - * @rule_id:the rule_id of the rule to be delete - * - * Delete a mirror rule for a given veb. - * - **/ -static enum i40e_status_code -i40e_aq_del_mirror_rule(struct i40e_hw *hw, - uint16_t seid, uint16_t rule_type, uint16_t *entries, - uint16_t count, uint16_t rule_id) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_add_delete_mirror_rule cmd; - uint16_t buff_len = 0; - enum i40e_status_code status; - void *buff = NULL; - - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_delete_mirror_rule); - memset(&cmd, 0, sizeof(cmd)); - if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { - desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF | - I40E_AQ_FLAG_RD)); - cmd.num_entries = count; - buff_len = sizeof(uint16_t) * count; - desc.datalen = rte_cpu_to_le_16(buff_len); - buff = (void *)entries; - } else - /* rule id is filled in destination field for deleting mirror rule */ - cmd.destination = rte_cpu_to_le_16(rule_id); - - cmd.rule_type = rte_cpu_to_le_16(rule_type << - I40E_AQC_MIRROR_RULE_TYPE_SHIFT); - cmd.seid = rte_cpu_to_le_16(seid); - - rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd)); - status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL); - - return status; -} - -/** - * i40e_mirror_rule_set - * @dev: pointer to the hardware structure - * @mirror_conf: mirror rule info - * @sw_id: mirror rule's sw_id - * @on: enable/disable - * - * set a mirror rule. - * - **/ -static int -i40e_mirror_rule_set(struct rte_eth_dev *dev, - struct rte_eth_mirror_conf *mirror_conf, - uint8_t sw_id, uint8_t on) -{ - struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct i40e_mirror_rule *it, *mirr_rule = NULL; - struct i40e_mirror_rule *parent = NULL; - uint16_t seid, dst_seid, rule_id; - uint16_t i, j = 0; - int ret; - - PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id); - - if (pf->main_vsi->veb == NULL || pf->vfs == NULL) { - PMD_DRV_LOG(ERR, - "mirror rule can not be configured without veb or vfs."); - return -ENOSYS; - } - if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) { - PMD_DRV_LOG(ERR, "mirror table is full."); - return -ENOSPC; - } - if (mirror_conf->dst_pool > pf->vf_num) { - PMD_DRV_LOG(ERR, "invalid destination pool %u.", - mirror_conf->dst_pool); - return -EINVAL; - } - - seid = pf->main_vsi->veb->seid; - - TAILQ_FOREACH(it, &pf->mirror_list, rules) { - if (sw_id <= it->index) { - mirr_rule = it; - break; - } - parent = it; - } - if (mirr_rule && sw_id == mirr_rule->index) { - if (on) { - PMD_DRV_LOG(ERR, "mirror rule exists."); - return -EEXIST; - } else { - ret = i40e_aq_del_mirror_rule(hw, seid, - mirr_rule->rule_type, - mirr_rule->entries, - mirr_rule->num_entries, mirr_rule->id); - if (ret < 0) { - PMD_DRV_LOG(ERR, - "failed to remove mirror rule: ret = %d, aq_err = %d.", - ret, hw->aq.asq_last_status); - return -ENOSYS; - } - TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules); - rte_free(mirr_rule); - pf->nb_mirror_rule--; - return 0; - } - } else if (!on) { - PMD_DRV_LOG(ERR, "mirror rule doesn't exist."); - return -ENOENT; - } - - mirr_rule = rte_zmalloc("i40e_mirror_rule", - sizeof(struct i40e_mirror_rule) , 0); - if (!mirr_rule) { - PMD_DRV_LOG(ERR, "failed to allocate memory"); - return I40E_ERR_NO_MEMORY; - } - switch (mirror_conf->rule_type) { - case ETH_MIRROR_VLAN: - for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) { - if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { - mirr_rule->entries[j] = - mirror_conf->vlan.vlan_id[i]; - j++; - } - } - if (j == 0) { - PMD_DRV_LOG(ERR, "vlan is not specified."); - rte_free(mirr_rule); - return -EINVAL; - } - mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN; - break; - case ETH_MIRROR_VIRTUAL_POOL_UP: - case ETH_MIRROR_VIRTUAL_POOL_DOWN: - /* check if the specified pool bit is out of range */ - if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) { - PMD_DRV_LOG(ERR, "pool mask is out of range."); - rte_free(mirr_rule); - return -EINVAL; - } - for (i = 0, j = 0; i < pf->vf_num; i++) { - if (mirror_conf->pool_mask & (1ULL << i)) { - mirr_rule->entries[j] = pf->vfs[i].vsi->seid; - j++; - } - } - if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) { - /* add pf vsi to entries */ - mirr_rule->entries[j] = pf->main_vsi_seid; - j++; - } - if (j == 0) { - PMD_DRV_LOG(ERR, "pool is not specified."); - rte_free(mirr_rule); - return -EINVAL; - } - /* egress and ingress in aq commands means from switch but not port */ - mirr_rule->rule_type = - (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ? - I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS : - I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS; - break; - case ETH_MIRROR_UPLINK_PORT: - /* egress and ingress in aq commands means from switch but not port*/ - mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS; - break; - case ETH_MIRROR_DOWNLINK_PORT: - mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS; - break; - default: - PMD_DRV_LOG(ERR, "unsupported mirror type %d.", - mirror_conf->rule_type); - rte_free(mirr_rule); - return -EINVAL; - } - - /* If the dst_pool is equal to vf_num, consider it as PF */ - if (mirror_conf->dst_pool == pf->vf_num) - dst_seid = pf->main_vsi_seid; - else - dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid; - - ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid, - mirr_rule->rule_type, mirr_rule->entries, - j, &rule_id); - if (ret < 0) { - PMD_DRV_LOG(ERR, - "failed to add mirror rule: ret = %d, aq_err = %d.", - ret, hw->aq.asq_last_status); - rte_free(mirr_rule); - return -ENOSYS; - } - - mirr_rule->index = sw_id; - mirr_rule->num_entries = j; - mirr_rule->id = rule_id; - mirr_rule->dst_vsi_seid = dst_seid; - - if (parent) - TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules); - else - TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules); - - pf->nb_mirror_rule++; - return 0; -} - -/** - * i40e_mirror_rule_reset - * @dev: pointer to the device - * @sw_id: mirror rule's sw_id - * - * reset a mirror rule. - * - **/ -static int -i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id) -{ - struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct i40e_mirror_rule *it, *mirr_rule = NULL; - uint16_t seid; - int ret; - - PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id); - - seid = pf->main_vsi->veb->seid; - - TAILQ_FOREACH(it, &pf->mirror_list, rules) { - if (sw_id == it->index) { - mirr_rule = it; - break; - } - } - if (mirr_rule) { - ret = i40e_aq_del_mirror_rule(hw, seid, - mirr_rule->rule_type, - mirr_rule->entries, - mirr_rule->num_entries, mirr_rule->id); - if (ret < 0) { - PMD_DRV_LOG(ERR, - "failed to remove mirror rule: status = %d, aq_err = %d.", - ret, hw->aq.asq_last_status); - return -ENOSYS; - } - TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules); - rte_free(mirr_rule); - pf->nb_mirror_rule--; - } else { - PMD_DRV_LOG(ERR, "mirror rule doesn't exist."); - return -ENOENT; - } - return 0; -} - static uint64_t i40e_read_systime_cyclecounter(struct rte_eth_dev *dev) { @@ -11575,13 +10870,17 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) * LLDP MIB change event. */ if (sw_dcb == TRUE) { - if (i40e_need_stop_lldp(dev)) { - ret = i40e_aq_stop_lldp(hw, TRUE, NULL); - if (ret != I40E_SUCCESS) - PMD_INIT_LOG(DEBUG, "Failed to stop lldp"); - } + /* Stopping lldp is necessary for DPDK, but it will cause + * DCB init failed. For i40e_init_dcb(), the prerequisite + * for successful initialization of DCB is that LLDP is + * enabled. So it is needed to start lldp before DCB init + * and stop it after initialization. + */ + ret = i40e_aq_start_lldp(hw, true, NULL); + if (ret != I40E_SUCCESS) + PMD_INIT_LOG(DEBUG, "Failed to start lldp"); - ret = i40e_init_dcb(hw); + ret = i40e_init_dcb(hw, true); /* If lldp agent is stopped, the return value from * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM * adminq status. Otherwise, it should return success. @@ -11624,12 +10923,18 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) ret, hw->aq.asq_last_status); return -ENOTSUP; } + + if (i40e_need_stop_lldp(dev)) { + ret = i40e_aq_stop_lldp(hw, true, true, NULL); + if (ret != I40E_SUCCESS) + PMD_INIT_LOG(DEBUG, "Failed to stop lldp"); + } } else { - ret = i40e_aq_start_lldp(hw, NULL); + ret = i40e_aq_start_lldp(hw, true, NULL); if (ret != I40E_SUCCESS) PMD_INIT_LOG(DEBUG, "Failed to start lldp"); - ret = i40e_init_dcb(hw); + ret = i40e_init_dcb(hw, true); if (!ret) { if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { PMD_INIT_LOG(ERR, @@ -12017,9 +11322,6 @@ static int i40e_get_module_eeprom(struct rte_eth_dev *dev, uint32_t value = 0; uint32_t i; - if (!info || !info->length || !info->data) - return -EINVAL; - if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP) is_sfp = true; @@ -12043,7 +11345,7 @@ static int i40e_get_module_eeprom(struct rte_eth_dev *dev, } status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, - addr, offset, 1, &value, NULL); + addr, 1, offset, &value, NULL); if (status) return -EIO; data[i] = (uint8_t)value; @@ -12120,7 +11422,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EBUSY; } - if (frame_size > RTE_ETHER_MAX_LEN) + if (frame_size > I40E_ETH_MAX_LEN) dev_data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else @@ -12220,23 +11522,13 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf) } } -/* Restore rss filter */ -static inline void -i40e_rss_filter_restore(struct i40e_pf *pf) -{ - struct i40e_rte_flow_rss_conf *conf = - &pf->rss_info; - if (conf->conf.queue_num) - i40e_config_rss_filter(pf, conf, TRUE); -} - static void i40e_filter_restore(struct i40e_pf *pf) { i40e_ethertype_filter_restore(pf); i40e_tunnel_filter_restore(pf); i40e_fdir_filter_restore(pf); - i40e_rss_filter_restore(pf); + (void)i40e_hash_filter_restore(pf); } bool @@ -12334,6 +11626,7 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, } } name[strlen(name) - 1] = '\0'; + PMD_DRV_LOG(INFO, "name = %s\n", name); if (!strcmp(name, "GTPC")) new_pctype = i40e_find_customized_pctype(pf, @@ -12350,6 +11643,38 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, new_pctype = i40e_find_customized_pctype(pf, I40E_CUSTOMIZED_GTPU); + else if (!strcmp(name, "IPV4_L2TPV3")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_IPV4_L2TPV3); + else if (!strcmp(name, "IPV6_L2TPV3")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_IPV6_L2TPV3); + else if (!strcmp(name, "IPV4_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV4); + else if (!strcmp(name, "IPV6_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV6); + else if (!strcmp(name, "IPV4_UDP_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV4_UDP); + else if (!strcmp(name, "IPV6_UDP_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV6_UDP); + else if (!strcmp(name, "IPV4_AH")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_AH_IPV4); + else if (!strcmp(name, "IPV6_AH")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_AH_IPV6); if (new_pctype) { if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) { new_pctype->pctype = pctype_value; @@ -12445,6 +11770,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, continue; memset(name, 0, sizeof(name)); strcpy(name, proto[n].name); + PMD_DRV_LOG(INFO, "name = %s\n", name); if (!strncasecmp(name, "PPPOE", 5)) ptype_mapping[i].sw_ptype |= RTE_PTYPE_L2_ETHER_PPPOE; @@ -12538,12 +11864,17 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, ptype_mapping[i].sw_ptype |= RTE_PTYPE_TUNNEL_GTPU; in_tunnel = true; + } else if (!strncasecmp(name, "ESP", 3)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_ESP; + in_tunnel = true; } else if (!strncasecmp(name, "GRENAT", 6)) { ptype_mapping[i].sw_ptype |= RTE_PTYPE_TUNNEL_GRENAT; in_tunnel = true; } else if (!strncasecmp(name, "L2TPV2CTL", 9) || - !strncasecmp(name, "L2TPV2", 6)) { + !strncasecmp(name, "L2TPV2", 6) || + !strncasecmp(name, "L2TPV3", 6)) { ptype_mapping[i].sw_ptype |= RTE_PTYPE_TUNNEL_L2TP; in_tunnel = true; @@ -12557,7 +11888,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping, ptype_num, 0); if (ret) - PMD_DRV_LOG(ERR, "Failed to update mapping table."); + PMD_DRV_LOG(ERR, "Failed to update ptype mapping table."); rte_free(ptype_mapping); rte_free(ptype); @@ -12622,6 +11953,17 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, } } + /* Check if ESP is supported. */ + for (i = 0; i < proto_num; i++) { + if (!strncmp(proto[i].name, "ESP", 3)) { + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) + pf->esp_support = true; + else + pf->esp_support = false; + break; + } + } + /* Update customized pctype info */ ret = i40e_update_customized_pctype(dev, pkg, pkg_size, proto_num, proto, op); @@ -12687,7 +12029,7 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) struct i40e_aqc_replace_cloud_filters_cmd filter_replace; struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id]; if (pf->support_multi_driver) { PMD_DRV_LOG(ERR, "Replace cloud filter is not supported."); @@ -12764,153 +12106,17 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) return ret; } -int -i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out, - const struct rte_flow_action_rss *in) -{ - if (in->key_len > RTE_DIM(out->key) || - in->queue_num > RTE_DIM(out->queue)) - return -EINVAL; - if (!in->key && in->key_len) - return -EINVAL; - out->conf = (struct rte_flow_action_rss){ - .func = in->func, - .level = in->level, - .types = in->types, - .key_len = in->key_len, - .queue_num = in->queue_num, - .queue = memcpy(out->queue, in->queue, - sizeof(*in->queue) * in->queue_num), - }; - if (in->key) - out->conf.key = memcpy(out->key, in->key, in->key_len); - return 0; -} - -int -i40e_action_rss_same(const struct rte_flow_action_rss *comp, - const struct rte_flow_action_rss *with) -{ - return (comp->func == with->func && - comp->level == with->level && - comp->types == with->types && - comp->key_len == with->key_len && - comp->queue_num == with->queue_num && - !memcmp(comp->key, with->key, with->key_len) && - !memcmp(comp->queue, with->queue, - sizeof(*with->queue) * with->queue_num)); -} - -int -i40e_config_rss_filter(struct i40e_pf *pf, - struct i40e_rte_flow_rss_conf *conf, bool add) -{ - struct i40e_hw *hw = I40E_PF_TO_HW(pf); - uint32_t i, lut = 0; - uint16_t j, num; - struct rte_eth_rss_conf rss_conf = { - .rss_key = conf->conf.key_len ? - (void *)(uintptr_t)conf->conf.key : NULL, - .rss_key_len = conf->conf.key_len, - .rss_hf = conf->conf.types, - }; - struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; - - if (!add) { - if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) { - i40e_pf_disable_rss(pf); - memset(rss_info, 0, - sizeof(struct i40e_rte_flow_rss_conf)); - return 0; - } - return -EINVAL; - } - - /* If both VMDQ and RSS enabled, not all of PF queues are configured. - * It's necessary to calculate the actual PF queues that are configured. - */ - if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) - num = i40e_pf_calc_configured_queues_num(pf); - else - num = pf->dev_data->nb_rx_queues; - - num = RTE_MIN(num, conf->conf.queue_num); - PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured", - num); - - if (num == 0) { - PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS"); - return -ENOTSUP; - } - - /* Fill in redirection table */ - for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { - if (j == num) - j = 0; - lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 << - hw->func_caps.rss_table_entry_width) - 1)); - if ((i & 3) == 3) - I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut); - } - - if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) { - i40e_pf_disable_rss(pf); - return 0; - } - if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < - (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) { - /* Random default keys */ - static uint32_t rss_key_default[] = {0x6b793944, - 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8, - 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605, - 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581}; - - rss_conf.rss_key = (uint8_t *)rss_key_default; - rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * - sizeof(uint32_t); - PMD_DRV_LOG(INFO, - "No valid RSS key config for i40e, using default\n"); - } - - i40e_hw_rss_hash_set(pf, &rss_conf); - - if (i40e_rss_conf_init(rss_info, &conf->conf)) - return -EINVAL; - - return 0; -} - -RTE_INIT(i40e_init_log) -{ - i40e_logtype_init = rte_log_register("pmd.net.i40e.init"); - if (i40e_logtype_init >= 0) - rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE); - i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver"); - if (i40e_logtype_driver >= 0) - rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE); - -#ifdef RTE_LIBRTE_I40E_DEBUG_RX - i40e_logtype_rx = rte_log_register("pmd.net.i40e.rx"); - if (i40e_logtype_rx >= 0) - rte_log_set_level(i40e_logtype_rx, RTE_LOG_DEBUG); +RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE); +RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE); +#ifdef RTE_ETHDEV_DEBUG_RX +RTE_LOG_REGISTER_SUFFIX(i40e_logtype_rx, rx, DEBUG); #endif - -#ifdef RTE_LIBRTE_I40E_DEBUG_TX - i40e_logtype_tx = rte_log_register("pmd.net.i40e.tx"); - if (i40e_logtype_tx >= 0) - rte_log_set_level(i40e_logtype_tx, RTE_LOG_DEBUG); -#endif - -#ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE - i40e_logtype_tx_free = rte_log_register("pmd.net.i40e.tx_free"); - if (i40e_logtype_tx_free >= 0) - rte_log_set_level(i40e_logtype_tx_free, RTE_LOG_DEBUG); +#ifdef RTE_ETHDEV_DEBUG_TX +RTE_LOG_REGISTER_SUFFIX(i40e_logtype_tx, tx, DEBUG); #endif -} RTE_PMD_REGISTER_PARAM_STRING(net_i40e, ETH_I40E_FLOATING_VEB_ARG "=1" ETH_I40E_FLOATING_VEB_LIST_ARG "=" ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16" - ETH_I40E_SUPPORT_MULTI_DRIVER "=1" - ETH_I40E_USE_LATEST_VEC "=0|1"); + ETH_I40E_SUPPORT_MULTI_DRIVER "=1");