X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev.c;h=7ffd7e7384110e44dfb1932056c2a284439ca4d8;hb=75315881c23f3aa7c04fb19c50915e64dd97bd38;hp=a8e8f6b865fdd33bc080948b1d1752e8d91ec9e2;hpb=fbd1913561484b58e155fbefea4e15491ed60c9f;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index a8e8f6b865..7ffd7e7384 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -17,8 +17,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include @@ -27,6 +27,7 @@ #include #include #include +#include #include "i40e_logs.h" #include "base/i40e_prototype.h" @@ -39,12 +40,12 @@ #include "i40e_pf.h" #include "i40e_regs.h" #include "rte_pmd_i40e.h" +#include "i40e_hash.h" #define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb" #define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list" #define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver" #define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf" -#define ETH_I40E_USE_LATEST_VEC "use-latest-supported-vec" #define ETH_I40E_VF_MSG_CFG "vf_msg_cfg" #define I40E_CLEAR_PXE_WAIT_MS 200 @@ -202,12 +203,12 @@ #define I40E_TRANSLATE_INSET 0 #define I40E_TRANSLATE_REG 1 -#define I40E_INSET_IPV4_TOS_MASK 0x0009FF00UL -#define I40E_INSET_IPv4_TTL_MASK 0x000D00FFUL -#define I40E_INSET_IPV4_PROTO_MASK 0x000DFF00UL -#define I40E_INSET_IPV6_TC_MASK 0x0009F00FUL -#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x000CFF00UL -#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL +#define I40E_INSET_IPV4_TOS_MASK 0x0000FF00UL +#define I40E_INSET_IPV4_TTL_MASK 0x000000FFUL +#define I40E_INSET_IPV4_PROTO_MASK 0x0000FF00UL +#define I40E_INSET_IPV6_TC_MASK 0x0000F00FUL +#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x0000FF00UL +#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000000FFUL /* PCI offset for querying capability */ #define PCI_DEV_CAP_REG 0xA4 @@ -220,11 +221,30 @@ /* Bit mask of Extended Tag enable/disable */ #define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT) +#define I40E_GLQF_PIT_IPV4_START 2 +#define I40E_GLQF_PIT_IPV4_COUNT 2 +#define I40E_GLQF_PIT_IPV6_START 4 +#define I40E_GLQF_PIT_IPV6_COUNT 2 + +#define I40E_GLQF_PIT_SOURCE_OFF_GET(a) \ + (((a) & I40E_GLQF_PIT_SOURCE_OFF_MASK) >> \ + I40E_GLQF_PIT_SOURCE_OFF_SHIFT) + +#define I40E_GLQF_PIT_DEST_OFF_GET(a) \ + (((a) & I40E_GLQF_PIT_DEST_OFF_MASK) >> \ + I40E_GLQF_PIT_DEST_OFF_SHIFT) + +#define I40E_GLQF_PIT_FSIZE_GET(a) (((a) & I40E_GLQF_PIT_FSIZE_MASK) >> \ + I40E_GLQF_PIT_FSIZE_SHIFT) + +#define I40E_GLQF_PIT_BUILD(off, mask) (((off) << 16) | (mask)) +#define I40E_FDIR_FIELD_OFFSET(a) ((a) >> 1) + static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params); static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev); static int i40e_dev_configure(struct rte_eth_dev *dev); static int i40e_dev_start(struct rte_eth_dev *dev); -static void i40e_dev_stop(struct rte_eth_dev *dev); +static int i40e_dev_stop(struct rte_eth_dev *dev); static int i40e_dev_close(struct rte_eth_dev *dev); static int i40e_dev_reset(struct rte_eth_dev *dev); static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev); @@ -319,29 +339,14 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel); static void i40e_filter_input_set_init(struct i40e_pf *pf); -static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg); -static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev, - enum rte_filter_type filter_type, - enum rte_filter_op filter_op, - void *arg); +static int i40e_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops); static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info); static int i40e_dev_sync_phy_type(struct i40e_hw *hw); static void i40e_configure_registers(struct i40e_hw *hw); static void i40e_hw_init(struct rte_eth_dev *dev); static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi); -static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw, - uint16_t seid, - uint16_t rule_type, - uint16_t *entries, - uint16_t count, - uint16_t rule_id); -static int i40e_mirror_rule_set(struct rte_eth_dev *dev, - struct rte_eth_mirror_conf *mirror_conf, - uint8_t sw_id, uint8_t on); -static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id); static int i40e_timesync_enable(struct rte_eth_dev *dev); static int i40e_timesync_disable(struct rte_eth_dev *dev); @@ -381,6 +386,7 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr); static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +static void i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size); static int i40e_ethertype_filter_convert( const struct rte_eth_ethertype_filter *input, @@ -399,14 +405,12 @@ static void i40e_ethertype_filter_restore(struct i40e_pf *pf); static void i40e_tunnel_filter_restore(struct i40e_pf *pf); static void i40e_filter_restore(struct i40e_pf *pf); static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev); -static int i40e_pf_config_rss(struct i40e_pf *pf); static const char *const valid_keys[] = { ETH_I40E_FLOATING_VEB_ARG, ETH_I40E_FLOATING_VEB_LIST_ARG, ETH_I40E_SUPPORT_MULTI_DRIVER, ETH_I40E_QUEUE_NUM_PER_VF_ARG, - ETH_I40E_USE_LATEST_VEC, ETH_I40E_VF_MSG_CFG, NULL}; @@ -489,13 +493,11 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .rss_hash_conf_get = i40e_dev_rss_hash_conf_get, .udp_tunnel_port_add = i40e_dev_udp_tunnel_port_add, .udp_tunnel_port_del = i40e_dev_udp_tunnel_port_del, - .filter_ctrl = i40e_dev_filter_ctrl, + .flow_ops_get = i40e_dev_flow_ops_get, .rxq_info_get = i40e_rxq_info_get, .txq_info_get = i40e_txq_info_get, .rx_burst_mode_get = i40e_rx_burst_mode_get, .tx_burst_mode_get = i40e_tx_burst_mode_get, - .mirror_rule_set = i40e_mirror_rule_set, - .mirror_rule_reset = i40e_mirror_rule_reset, .timesync_enable = i40e_timesync_enable, .timesync_disable = i40e_timesync_disable, .timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp, @@ -513,12 +515,13 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .mtu_set = i40e_dev_mtu_set, .tm_ops_get = i40e_tm_ops_get, .tx_done_cleanup = i40e_tx_done_cleanup, + .get_monitor_addr = i40e_get_monitor_addr, }; /* store statistics names and its offset in stats structure */ struct rte_i40e_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; - unsigned offset; + int offset; }; static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = { @@ -528,6 +531,8 @@ static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = { {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)}, {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats, rx_unknown_protocol)}, + {"rx_size_error_packets", offsetof(struct i40e_pf, rx_err1) - + offsetof(struct i40e_pf, stats)}, {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)}, {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)}, {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)}, @@ -643,6 +648,13 @@ eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, return retval; } + if (eth_da.nb_representor_ports > 0 && + eth_da.type != RTE_ETH_REPRESENTOR_VF) { + PMD_DRV_LOG(ERR, "unsupported representor type: %s\n", + pci_dev->device.devargs->args); + return -ENOTSUP; + } + retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, sizeof(struct i40e_adapter), eth_dev_pci_specific_init, pci_dev, @@ -711,10 +723,11 @@ i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr, uint32_t reg_val) { uint32_t ori_reg_val; - struct rte_eth_dev *dev; + struct rte_eth_dev_data *dev_data = + ((struct i40e_adapter *)hw->back)->pf.dev_data; + struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id]; ori_reg_val = i40e_read_rx_ctl(hw, reg_addr); - dev = ((struct i40e_adapter *)hw->back)->eth_dev; i40e_write_rx_ctl(hw, reg_addr, reg_val); if (ori_reg_val != reg_val) PMD_DRV_LOG(WARNING, @@ -766,6 +779,21 @@ static inline void i40e_config_automask(struct i40e_pf *pf) I40E_WRITE_REG(hw, I40E_GLINT_CTL, val); } +static inline void i40e_clear_automask(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t val; + + val = I40E_READ_REG(hw, I40E_GLINT_CTL); + val &= ~(I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK | + I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK); + + if (!pf->support_multi_driver) + val &= ~I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK; + + I40E_WRITE_REG(hw, I40E_GLINT_CTL, val); +} + #define I40E_FLOW_CONTROL_ETHERTYPE 0x8808 /* @@ -818,6 +846,8 @@ floating_veb_list_handler(__rte_unused const char *key, idx = strtoul(floating_veb_value, &end, 10); if (errno || end == NULL) return -1; + if (idx < 0) + return -1; while (isblank(*end)) end++; if (*end == '-') { @@ -1049,6 +1079,7 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev) char fdir_hash_name[RTE_HASH_NAMESIZE]; uint32_t alloc = hw->func_caps.fd_filters_guaranteed; uint32_t best = hw->func_caps.fd_filters_best_effort; + enum i40e_filter_pctype pctype; struct rte_bitmap *bmp = NULL; uint32_t bmp_size; void *mem = NULL; @@ -1097,6 +1128,10 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev) goto err_fdir_filter_array_alloc; } + for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) + pf->fdir.flow_count[pctype] = 0; + fdir_info->fdir_space_size = alloc + best; fdir_info->fdir_actual_cnt = 0; fdir_info->fdir_guarantee_total_space = alloc; @@ -1283,7 +1318,9 @@ i40e_aq_debug_write_global_register(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { uint64_t ori_reg_val; - struct rte_eth_dev *dev; + struct rte_eth_dev_data *dev_data = + ((struct i40e_adapter *)hw->back)->pf.dev_data; + struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id]; int ret; ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL); @@ -1293,7 +1330,6 @@ i40e_aq_debug_write_global_register(struct i40e_hw *hw, reg_addr); return -EIO; } - dev = ((struct i40e_adapter *)hw->back)->eth_dev; if (ori_reg_val != reg_val) PMD_DRV_LOG(WARNING, @@ -1304,62 +1340,6 @@ i40e_aq_debug_write_global_register(struct i40e_hw *hw, return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details); } -static int -i40e_parse_latest_vec_handler(__rte_unused const char *key, - const char *value, - void *opaque) -{ - struct i40e_adapter *ad = opaque; - int use_latest_vec; - - use_latest_vec = atoi(value); - - if (use_latest_vec != 0 && use_latest_vec != 1) - PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!"); - - ad->use_latest_vec = (uint8_t)use_latest_vec; - - return 0; -} - -static int -i40e_use_latest_vec(struct rte_eth_dev *dev) -{ - struct i40e_adapter *ad = - I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); - struct rte_kvargs *kvlist; - int kvargs_count; - - ad->use_latest_vec = false; - - if (!dev->device->devargs) - return 0; - - kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); - if (!kvlist) - return -EINVAL; - - kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC); - if (!kvargs_count) { - rte_kvargs_free(kvlist); - return 0; - } - - if (kvargs_count > 1) - PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only " - "the first invalid or last valid one is used !", - ETH_I40E_USE_LATEST_VEC); - - if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC, - i40e_parse_latest_vec_handler, ad) < 0) { - rte_kvargs_free(kvlist); - return -EINVAL; - } - - rte_kvargs_free(kvlist); - return 0; -} - static int read_vf_msg_config(__rte_unused const char *key, const char *value, @@ -1445,7 +1425,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) dev->dev_ops = &i40e_eth_dev_ops; dev->rx_queue_count = i40e_dev_rx_queue_count; - dev->rx_descriptor_done = i40e_dev_rx_descriptor_done; dev->rx_descriptor_status = i40e_dev_rx_descriptor_status; dev->tx_descriptor_status = i40e_dev_tx_descriptor_status; dev->rx_pkt_burst = i40e_recv_pkts; @@ -1462,12 +1441,11 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) } i40e_set_default_ptype_table(dev); pci_dev = RTE_ETH_DEV_TO_PCI(dev); - intr_handle = &pci_dev->intr_handle; + intr_handle = pci_dev->intr_handle; rte_eth_copy_pci_info(dev, pci_dev); pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); - pf->adapter->eth_dev = dev; pf->dev_data = dev->data; hw->back = I40E_PF_TO_ADAPTER(pf); @@ -1509,8 +1487,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg); /* Check if need to support multi-driver */ i40e_support_multi_driver(dev); - /* Check if users want the latest supported vec path */ - i40e_use_latest_vec(dev); /* Make sure all is clean before doing PF reset */ i40e_clear_hw(hw); @@ -1536,8 +1512,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret); return -EIO; } - /* Firmware of SFP x722 does not support adminq option */ - if (hw->device_id == I40E_DEV_ID_SFP_X722) + /* Firmware of SFP x722 does not support 802.1ad frames ability */ + if (hw->device_id == I40E_DEV_ID_SFP_X722 || + hw->device_id == I40E_DEV_ID_SFP_I_X722) hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE; PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x", @@ -1653,7 +1630,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) /* Set the global registers with default ether type value */ if (!pf->support_multi_driver) { - ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, + ret = i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER, RTE_ETHER_TYPE_VLAN); if (ret != I40E_SUCCESS) { PMD_INIT_LOG(ERR, @@ -1733,14 +1710,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) */ i40e_add_tx_flow_control_drop_filter(pf); - /* Set the max frame size to 0x2600 by default, - * in case other drivers changed the default value. - */ - i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL); - - /* initialize mirror rule list */ - TAILQ_INIT(&pf->mirror_list); - /* initialize RSS rule list */ TAILQ_INIT(&pf->rss_config_list); @@ -1766,22 +1735,20 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) /* initialize queue region configuration */ i40e_init_queue_region_conf(dev); - /* initialize RSS configuration from rte_flow */ - memset(&pf->rss_info, 0, - sizeof(struct i40e_rte_flow_rss_conf)); - /* reset all stats of the device, including pf and main vsi */ i40e_dev_stats_reset(dev); return 0; err_init_fdir_filter_list: - rte_free(pf->tunnel.hash_table); + rte_hash_free(pf->tunnel.hash_table); rte_free(pf->tunnel.hash_map); err_init_tunnel_filter_list: - rte_free(pf->ethertype.hash_table); + rte_hash_free(pf->ethertype.hash_table); rte_free(pf->ethertype.hash_map); err_init_ethtype_filter_list: + rte_intr_callback_unregister(intr_handle, + i40e_dev_interrupt_handler, dev); rte_free(dev->data->mac_addrs); dev->data->mac_addrs = NULL; err_mac_alloc: @@ -1810,10 +1777,8 @@ i40e_rm_ethtype_filter_list(struct i40e_pf *pf) ethertype_rule = &pf->ethertype; /* Remove all ethertype filter rules and hash */ - if (ethertype_rule->hash_map) - rte_free(ethertype_rule->hash_map); - if (ethertype_rule->hash_table) - rte_hash_free(ethertype_rule->hash_table); + rte_free(ethertype_rule->hash_map); + rte_hash_free(ethertype_rule->hash_table); while ((p_ethertype = TAILQ_FIRST(ðertype_rule->ethertype_list))) { TAILQ_REMOVE(ðertype_rule->ethertype_list, @@ -1830,10 +1795,8 @@ i40e_rm_tunnel_filter_list(struct i40e_pf *pf) tunnel_rule = &pf->tunnel; /* Remove all tunnel director rules and hash */ - if (tunnel_rule->hash_map) - rte_free(tunnel_rule->hash_map); - if (tunnel_rule->hash_table) - rte_hash_free(tunnel_rule->hash_table); + rte_free(tunnel_rule->hash_map); + rte_hash_free(tunnel_rule->hash_table); while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) { TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules); @@ -1862,16 +1825,11 @@ i40e_fdir_memory_cleanup(struct i40e_pf *pf) fdir_info = &pf->fdir; /* flow director memory cleanup */ - if (fdir_info->hash_map) - rte_free(fdir_info->hash_map); - if (fdir_info->hash_table) - rte_hash_free(fdir_info->hash_table); - if (fdir_info->fdir_flow_pool.bitmap) - rte_free(fdir_info->fdir_flow_pool.bitmap); - if (fdir_info->fdir_flow_pool.pool) - rte_free(fdir_info->fdir_flow_pool.pool); - if (fdir_info->fdir_filter_array) - rte_free(fdir_info->fdir_filter_array); + rte_free(fdir_info->hash_map); + rte_hash_free(fdir_info->hash_table); + rte_free(fdir_info->fdir_flow_pool.bitmap); + rte_free(fdir_info->fdir_flow_pool.pool); + rte_free(fdir_info->fdir_filter_array); } void i40e_flex_payload_reg_set_default(struct i40e_hw *hw) @@ -1925,8 +1883,8 @@ i40e_dev_configure(struct rte_eth_dev *dev) ad->tx_simple_allowed = true; ad->tx_vec_allowed = true; - if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) - dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; /* Only legacy filter API needs the following fdir config. So when the * legacy filter API is deprecated, the following codes should also be @@ -1951,7 +1909,7 @@ i40e_dev_configure(struct rte_eth_dev *dev) goto err; /* VMDQ setup. - * General PMD driver call sequence are NIC init, configure, + * General PMD call sequence are NIC init, configure, * rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it * will try to lookup the VSI that specific queue belongs to if VMDQ * applicable. So, VMDQ setting has to be done before @@ -1960,13 +1918,13 @@ i40e_dev_configure(struct rte_eth_dev *dev) * number, which will be available after rx_queue_setup(). dev_start() * function is good to place RSS setup. */ - if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) { + if (mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) { ret = i40e_vmdq_setup(dev); if (ret) goto err; } - if (mq_mode & ETH_MQ_RX_DCB_FLAG) { + if (mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) { ret = i40e_dcb_setup(dev); if (ret) { PMD_DRV_LOG(ERR, "failed to configure DCB."); @@ -1999,9 +1957,9 @@ err: void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi) { - struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t msix_vect = vsi->msix_intr; uint16_t i; @@ -2115,12 +2073,13 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, int i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) { - struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t msix_vect = vsi->msix_intr; - uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd); + uint16_t nb_msix = RTE_MIN(vsi->nb_msix, + rte_intr_nb_efd_get(intr_handle)); uint16_t queue_idx = 0; int record = 0; int i; @@ -2170,8 +2129,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) vsi->nb_used_qps - i, itr_idx); for (; !!record && i < vsi->nb_used_qps; i++) - intr_handle->intr_vec[queue_idx + i] = - msix_vect; + rte_intr_vec_list_index_set(intr_handle, + queue_idx + i, msix_vect); break; } /* 1:1 queue/msix_vect mapping */ @@ -2179,7 +2138,9 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) vsi->base_queue + i, 1, itr_idx); if (!!record) - intr_handle->intr_vec[queue_idx + i] = msix_vect; + if (rte_intr_vec_list_index_set(intr_handle, + queue_idx + i, msix_vect)) + return -rte_errno; msix_vect++; nb_msix--; @@ -2191,9 +2152,9 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) void i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) { - struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); uint16_t msix_intr, i; @@ -2218,9 +2179,9 @@ i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) void i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi) { - struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); uint16_t msix_intr, i; @@ -2243,17 +2204,17 @@ i40e_parse_link_speeds(uint16_t link_speeds) { uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN; - if (link_speeds & ETH_LINK_SPEED_40G) + if (link_speeds & RTE_ETH_LINK_SPEED_40G) link_speed |= I40E_LINK_SPEED_40GB; - if (link_speeds & ETH_LINK_SPEED_25G) + if (link_speeds & RTE_ETH_LINK_SPEED_25G) link_speed |= I40E_LINK_SPEED_25GB; - if (link_speeds & ETH_LINK_SPEED_20G) + if (link_speeds & RTE_ETH_LINK_SPEED_20G) link_speed |= I40E_LINK_SPEED_20GB; - if (link_speeds & ETH_LINK_SPEED_10G) + if (link_speeds & RTE_ETH_LINK_SPEED_10G) link_speed |= I40E_LINK_SPEED_10GB; - if (link_speeds & ETH_LINK_SPEED_1G) + if (link_speeds & RTE_ETH_LINK_SPEED_1G) link_speed |= I40E_LINK_SPEED_1GB; - if (link_speeds & ETH_LINK_SPEED_100M) + if (link_speeds & RTE_ETH_LINK_SPEED_100M) link_speed |= I40E_LINK_SPEED_100MB; return link_speed; @@ -2331,7 +2292,8 @@ i40e_phy_conf_link(struct i40e_hw *hw, phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0; phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR | I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR | - I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0; + I40E_AQ_PHY_TYPE_EXT_25G_LR | I40E_AQ_PHY_TYPE_EXT_25G_AOC | + I40E_AQ_PHY_TYPE_EXT_25G_ACC) : 0; phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info; phy_conf.eee_capability = phy_ab.eee_capability; phy_conf.eeer = phy_ab.eeer_val; @@ -2360,13 +2322,13 @@ i40e_apply_link_speed(struct rte_eth_dev *dev) abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK | I40E_AQ_PHY_LINK_ENABLED; - if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) { - conf->link_speeds = ETH_LINK_SPEED_40G | - ETH_LINK_SPEED_25G | - ETH_LINK_SPEED_20G | - ETH_LINK_SPEED_10G | - ETH_LINK_SPEED_1G | - ETH_LINK_SPEED_100M; + if (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { + conf->link_speeds = RTE_ETH_LINK_SPEED_40G | + RTE_ETH_LINK_SPEED_25G | + RTE_ETH_LINK_SPEED_20G | + RTE_ETH_LINK_SPEED_10G | + RTE_ETH_LINK_SPEED_1G | + RTE_ETH_LINK_SPEED_100M; abilities |= I40E_AQ_PHY_AN_ENABLED; } else { @@ -2385,10 +2347,11 @@ i40e_dev_start(struct rte_eth_dev *dev) struct i40e_vsi *main_vsi = pf->main_vsi; int ret, i; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; uint32_t intr_vector = 0; struct i40e_vsi *vsi; uint16_t nb_rxq, nb_txq; + uint16_t max_frame_size; hw->adapter_stopped = 0; @@ -2403,12 +2366,9 @@ i40e_dev_start(struct rte_eth_dev *dev) return ret; } - if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { - intr_handle->intr_vec = - rte_zmalloc("intr_vec", - dev->data->nb_rx_queues * sizeof(int), - 0); - if (!intr_handle->intr_vec) { + if (rte_intr_dp_is_en(intr_handle)) { + if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", + dev->data->nb_rx_queues)) { PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues intr_vec", dev->data->nb_rx_queues); @@ -2511,7 +2471,7 @@ i40e_dev_start(struct rte_eth_dev *dev) if (ret != I40E_SUCCESS) PMD_DRV_LOG(WARNING, "Fail to set phy mask"); - /* Call get_link_info aq commond to enable/disable LSE */ + /* Call get_link_info aq command to enable/disable LSE */ i40e_dev_link_update(dev, 0); } @@ -2530,6 +2490,9 @@ i40e_dev_start(struct rte_eth_dev *dev) "please call hierarchy_commit() " "before starting the port"); + max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD; + i40e_set_mac_max_frame(dev, max_frame_size); + return I40E_SUCCESS; tx_err: @@ -2542,18 +2505,18 @@ rx_err: return ret; } -static void +static int i40e_dev_stop(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *main_vsi = pf->main_vsi; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; int i; if (hw->adapter_stopped == 1) - return; + return 0; if (dev->data->dev_conf.intr_conf.rxq == 0) { rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev); @@ -2590,17 +2553,19 @@ i40e_dev_stop(struct rte_eth_dev *dev) /* Clean datapath event and queue/vec mapping */ rte_intr_efd_disable(intr_handle); - if (intr_handle->intr_vec) { - rte_free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } + + /* Cleanup vector list */ + rte_intr_vec_list_free(intr_handle); /* reset hierarchy commit */ pf->tm_conf.committed = false; hw->adapter_stopped = 1; + dev->data->dev_started = 0; pf->adapter->rss_reta_updated = 0; + + return 0; } static int @@ -2609,8 +2574,7 @@ i40e_dev_close(struct rte_eth_dev *dev) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - struct i40e_mirror_rule *p_mirror; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct i40e_filter_control_settings settings; struct rte_flow *p_flow; uint32_t reg; @@ -2620,32 +2584,15 @@ i40e_dev_close(struct rte_eth_dev *dev) int retries = 0; PMD_INIT_FUNC_TRACE(); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; ret = rte_eth_switch_domain_free(pf->switch_domain_id); if (ret) PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret); - i40e_dev_stop(dev); - - /* Remove all mirror rules */ - while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) { - ret = i40e_aq_del_mirror_rule(hw, - pf->main_vsi->veb->seid, - p_mirror->rule_type, - p_mirror->entries, - p_mirror->num_entries, - p_mirror->id); - if (ret < 0) - PMD_DRV_LOG(ERR, "failed to remove mirror rule: " - "status = %d, aq_err = %d.", ret, - hw->aq.asq_last_status); - - /* remove mirror software resource anyway */ - TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules); - rte_free(p_mirror); - pf->nb_mirror_rule--; - } + ret = i40e_dev_stop(dev); i40e_dev_free_queues(dev); @@ -2690,10 +2637,6 @@ i40e_dev_close(struct rte_eth_dev *dev) (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); I40E_WRITE_FLUSH(hw); - dev->dev_ops = NULL; - dev->rx_pkt_burst = NULL; - dev->tx_pkt_burst = NULL; - /* Clear PXE mode */ i40e_clear_pxe_mode(hw); @@ -2742,8 +2685,10 @@ i40e_dev_close(struct rte_eth_dev *dev) /* Remove all Traffic Manager configuration */ i40e_tm_conf_uninit(dev); + i40e_clear_automask(pf); + hw->adapter_closed = 1; - return 0; + return ret; } /* @@ -2894,11 +2839,13 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev) return i40e_phy_conf_link(hw, abilities, speed, false); } +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ + static __rte_always_inline void update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link) { /* Link status registers and values*/ -#define I40E_PRTMAC_LINKSTA 0x001E2420 #define I40E_REG_LINK_UP 0x40000080 #define I40E_PRTMAC_MACC 0x001E24E0 #define I40E_REG_MACC_25GB 0x00020000 @@ -2911,7 +2858,7 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link) uint32_t link_speed; uint32_t reg_val; - reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA); + reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA(0)); link_speed = reg_val & I40E_REG_SPEED_MASK; reg_val &= I40E_REG_LINK_UP; link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0; @@ -2922,34 +2869,34 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link) /* Parse the link status */ switch (link_speed) { case I40E_REG_SPEED_0: - link->link_speed = ETH_SPEED_NUM_100M; + link->link_speed = RTE_ETH_SPEED_NUM_100M; break; case I40E_REG_SPEED_1: - link->link_speed = ETH_SPEED_NUM_1G; + link->link_speed = RTE_ETH_SPEED_NUM_1G; break; case I40E_REG_SPEED_2: if (hw->mac.type == I40E_MAC_X722) - link->link_speed = ETH_SPEED_NUM_2_5G; + link->link_speed = RTE_ETH_SPEED_NUM_2_5G; else - link->link_speed = ETH_SPEED_NUM_10G; + link->link_speed = RTE_ETH_SPEED_NUM_10G; break; case I40E_REG_SPEED_3: if (hw->mac.type == I40E_MAC_X722) { - link->link_speed = ETH_SPEED_NUM_5G; + link->link_speed = RTE_ETH_SPEED_NUM_5G; } else { reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC); if (reg_val & I40E_REG_MACC_25GB) - link->link_speed = ETH_SPEED_NUM_25G; + link->link_speed = RTE_ETH_SPEED_NUM_25G; else - link->link_speed = ETH_SPEED_NUM_40G; + link->link_speed = RTE_ETH_SPEED_NUM_40G; } break; case I40E_REG_SPEED_4: if (hw->mac.type == I40E_MAC_X722) - link->link_speed = ETH_SPEED_NUM_10G; + link->link_speed = RTE_ETH_SPEED_NUM_10G; else - link->link_speed = ETH_SPEED_NUM_20G; + link->link_speed = RTE_ETH_SPEED_NUM_20G; break; default: PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed); @@ -2961,8 +2908,6 @@ static __rte_always_inline void update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link, bool enable_lse, int wait_to_complete) { -#define CHECK_INTERVAL 100 /* 100ms */ -#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ uint32_t rep_cnt = MAX_REPEAT_TIME; struct i40e_link_status link_status; int status; @@ -2976,8 +2921,8 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link, status = i40e_aq_get_link_info(hw, enable_lse, &link_status, NULL); if (unlikely(status != I40E_SUCCESS)) { - link->link_speed = ETH_SPEED_NUM_NONE; - link->link_duplex = ETH_LINK_FULL_DUPLEX; + link->link_speed = RTE_ETH_SPEED_NUM_NONE; + link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; PMD_DRV_LOG(ERR, "Failed to get link info"); return; } @@ -2992,28 +2937,28 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link, /* Parse the link status */ switch (link_status.link_speed) { case I40E_LINK_SPEED_100MB: - link->link_speed = ETH_SPEED_NUM_100M; + link->link_speed = RTE_ETH_SPEED_NUM_100M; break; case I40E_LINK_SPEED_1GB: - link->link_speed = ETH_SPEED_NUM_1G; + link->link_speed = RTE_ETH_SPEED_NUM_1G; break; case I40E_LINK_SPEED_10GB: - link->link_speed = ETH_SPEED_NUM_10G; + link->link_speed = RTE_ETH_SPEED_NUM_10G; break; case I40E_LINK_SPEED_20GB: - link->link_speed = ETH_SPEED_NUM_20G; + link->link_speed = RTE_ETH_SPEED_NUM_20G; break; case I40E_LINK_SPEED_25GB: - link->link_speed = ETH_SPEED_NUM_25G; + link->link_speed = RTE_ETH_SPEED_NUM_25G; break; case I40E_LINK_SPEED_40GB: - link->link_speed = ETH_SPEED_NUM_40G; + link->link_speed = RTE_ETH_SPEED_NUM_40G; break; default: if (link->link_status) - link->link_speed = ETH_SPEED_NUM_UNKNOWN; + link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; else - link->link_speed = ETH_SPEED_NUM_NONE; + link->link_speed = RTE_ETH_SPEED_NUM_NONE; break; } } @@ -3030,9 +2975,9 @@ i40e_dev_link_update(struct rte_eth_dev *dev, memset(&link, 0, sizeof(link)); /* i40e uses full duplex only */ - link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; link.link_autoneg = !(dev->data->dev_conf.link_speeds & - ETH_LINK_SPEED_FIXED); + RTE_ETH_LINK_SPEED_FIXED); if (!wait_to_complete && !enable_lse) update_link_reg(hw, &link); @@ -3248,6 +3193,10 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) pf->offset_loaded, &os->eth.rx_unknown_protocol, &ns->eth.rx_unknown_protocol); + i40e_stat_update_48(hw, I40E_GL_RXERR1_H(hw->pf_id + I40E_MAX_VF), + I40E_GL_RXERR1_L(hw->pf_id + I40E_MAX_VF), + pf->offset_loaded, &pf->rx_err1_offset, + &pf->rx_err1); i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port), I40E_GLPRT_GOTCL(hw->port), pf->offset_loaded, &os->eth.tx_bytes, @@ -3447,7 +3396,8 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->ipackets = pf->main_vsi->eth_stats.rx_unicast + pf->main_vsi->eth_stats.rx_multicast + pf->main_vsi->eth_stats.rx_broadcast - - pf->main_vsi->eth_stats.rx_discards; + pf->main_vsi->eth_stats.rx_discards - + pf->rx_err1; stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast + ns->eth.tx_broadcast; @@ -3461,7 +3411,8 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) pf->main_vsi->eth_stats.rx_discards; stats->ierrors = ns->crc_errors + ns->rx_length_errors + ns->rx_undersize + - ns->rx_oversize + ns->rx_fragments + ns->rx_jabber; + ns->rx_oversize + ns->rx_fragments + ns->rx_jabber + + pf->rx_err1; if (pf->vfs) { for (i = 0; i < pf->vf_num; i++) { @@ -3596,7 +3547,7 @@ static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, count++; } - /* Get individiual stats from i40e_hw_port struct */ + /* Get individual stats from i40e_hw_port struct */ for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) { strlcpy(xstats_names[count].name, rte_i40e_hw_port_strings[i].name, @@ -3654,7 +3605,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, count++; } - /* Get individiual stats from i40e_hw_port struct */ + /* Get individual stats from i40e_hw_port struct */ for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) { xstats[count].value = *(uint64_t *)(((char *)hw_stats) + rte_i40e_hw_port_strings[i].offset); @@ -3707,9 +3658,11 @@ i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) ((hw->nvm.version >> 4) & 0xff), (hw->nvm.version & 0xf), hw->nvm.eetrack, ver, build, patch); + if (ret < 0) + return -EINVAL; ret += 1; /* add the size of '\0' */ - if (fw_size < (u32)ret) + if (fw_size < (size_t)ret) return ret; else return 0; @@ -3758,38 +3711,39 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->min_mtu = RTE_ETHER_MIN_MTU; dev_info->rx_queue_offload_capa = 0; dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_QINQ_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_KEEP_CRC | - DEV_RX_OFFLOAD_SCATTER | - DEV_RX_OFFLOAD_VLAN_EXTEND | - DEV_RX_OFFLOAD_VLAN_FILTER | - DEV_RX_OFFLOAD_JUMBO_FRAME | - DEV_RX_OFFLOAD_RSS_HASH; - - dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; + RTE_ETH_RX_OFFLOAD_VLAN_STRIP | + RTE_ETH_RX_OFFLOAD_QINQ_STRIP | + RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_CKSUM | + RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_KEEP_CRC | + RTE_ETH_RX_OFFLOAD_SCATTER | + RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | + RTE_ETH_RX_OFFLOAD_VLAN_FILTER | + RTE_ETH_RX_OFFLOAD_RSS_HASH; + + dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_QINQ_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO | - DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO | - DEV_TX_OFFLOAD_IPIP_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO | - DEV_TX_OFFLOAD_MULTI_SEGS | + RTE_ETH_TX_OFFLOAD_VLAN_INSERT | + RTE_ETH_TX_OFFLOAD_QINQ_INSERT | + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | + RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | + RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | dev_info->tx_queue_offload_capa; dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); @@ -3844,7 +3798,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) { /* For XL710 */ - dev_info->speed_capa = ETH_LINK_SPEED_40G; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_40G; dev_info->default_rxportconf.nb_queues = 2; dev_info->default_txportconf.nb_queues = 2; if (dev->data->nb_rx_queues == 1) @@ -3858,17 +3812,17 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) { /* For XXV710 */ - dev_info->speed_capa = ETH_LINK_SPEED_25G; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_25G; dev_info->default_rxportconf.nb_queues = 1; dev_info->default_txportconf.nb_queues = 1; dev_info->default_rxportconf.ring_size = 256; dev_info->default_txportconf.ring_size = 256; } else { /* For X710 */ - dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G; dev_info->default_rxportconf.nb_queues = 1; dev_info->default_txportconf.nb_queues = 1; - if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) { + if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_10G) { dev_info->default_rxportconf.ring_size = 512; dev_info->default_txportconf.ring_size = 256; } else { @@ -3907,7 +3861,7 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev, int ret; if (qinq) { - if (vlan_type == ETH_VLAN_TYPE_OUTER) + if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) reg_id = 2; } @@ -3954,12 +3908,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); int qinq = dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_EXTEND; + RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; int ret = 0; - if ((vlan_type != ETH_VLAN_TYPE_INNER && - vlan_type != ETH_VLAN_TYPE_OUTER) || - (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) { + if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER && + vlan_type != RTE_ETH_VLAN_TYPE_OUTER) || + (!qinq && vlan_type == RTE_ETH_VLAN_TYPE_INNER)) { PMD_DRV_LOG(ERR, "Unsupported vlan type."); return -EINVAL; @@ -3973,12 +3927,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, /* 802.1ad frames ability is added in NVM API 1.7*/ if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { if (qinq) { - if (vlan_type == ETH_VLAN_TYPE_OUTER) + if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) hw->first_tag = rte_cpu_to_le_16(tpid); - else if (vlan_type == ETH_VLAN_TYPE_INNER) + else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) hw->second_tag = rte_cpu_to_le_16(tpid); } else { - if (vlan_type == ETH_VLAN_TYPE_OUTER) + if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) hw->second_tag = rte_cpu_to_le_16(tpid); } ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL); @@ -4037,37 +3991,37 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) struct rte_eth_rxmode *rxmode; rxmode = &dev->data->dev_conf.rxmode; - if (mask & ETH_VLAN_FILTER_MASK) { - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + if (mask & RTE_ETH_VLAN_FILTER_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) i40e_vsi_config_vlan_filter(vsi, TRUE); else i40e_vsi_config_vlan_filter(vsi, FALSE); } - if (mask & ETH_VLAN_STRIP_MASK) { + if (mask & RTE_ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) i40e_vsi_config_vlan_stripping(vsi, TRUE); else i40e_vsi_config_vlan_stripping(vsi, FALSE); } - if (mask & ETH_VLAN_EXTEND_MASK) { - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) { + if (mask & RTE_ETH_VLAN_EXTEND_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) { i40e_vsi_config_double_vlan(vsi, TRUE); /* Set global registers with default ethertype. */ - i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, + i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER, RTE_ETHER_TYPE_VLAN); - i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER, + i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER, RTE_ETHER_TYPE_VLAN); } else i40e_vsi_config_double_vlan(vsi, FALSE); } - if (mask & ETH_QINQ_STRIP_MASK) { + if (mask & RTE_ETH_QINQ_STRIP_MASK) { /* Enable or disable outer VLAN stripping */ - if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) i40e_vsi_config_outer_vlan_stripping(vsi, TRUE); else i40e_vsi_config_outer_vlan_stripping(vsi, FALSE); @@ -4150,17 +4104,17 @@ i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) /* Return current mode according to actual setting*/ switch (hw->fc.current_mode) { case I40E_FC_FULL: - fc_conf->mode = RTE_FC_FULL; + fc_conf->mode = RTE_ETH_FC_FULL; break; case I40E_FC_TX_PAUSE: - fc_conf->mode = RTE_FC_TX_PAUSE; + fc_conf->mode = RTE_ETH_FC_TX_PAUSE; break; case I40E_FC_RX_PAUSE: - fc_conf->mode = RTE_FC_RX_PAUSE; + fc_conf->mode = RTE_ETH_FC_RX_PAUSE; break; case I40E_FC_NONE: default: - fc_conf->mode = RTE_FC_NONE; + fc_conf->mode = RTE_ETH_FC_NONE; }; return 0; @@ -4176,10 +4130,10 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) struct i40e_hw *hw; struct i40e_pf *pf; enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = { - [RTE_FC_NONE] = I40E_FC_NONE, - [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE, - [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE, - [RTE_FC_FULL] = I40E_FC_FULL + [RTE_ETH_FC_NONE] = I40E_FC_NONE, + [RTE_ETH_FC_RX_PAUSE] = I40E_FC_RX_PAUSE, + [RTE_ETH_FC_TX_PAUSE] = I40E_FC_TX_PAUSE, + [RTE_ETH_FC_FULL] = I40E_FC_FULL }; /* high_water field in the rte_eth_fc_conf using the kilobytes unit */ @@ -4326,10 +4280,10 @@ i40e_macaddr_add(struct rte_eth_dev *dev, } rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN); - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) - mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) + mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; else - mac_filter.filter_type = RTE_MAC_PERFECT_MATCH; + mac_filter.filter_type = I40E_MAC_PERFECT_MATCH; if (pool == 0) vsi = pf->main_vsi; @@ -4384,119 +4338,6 @@ i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) } } -/* Set perfect match or hash match of MAC and VLAN for a VF */ -static int -i40e_vf_mac_filter_set(struct i40e_pf *pf, - struct rte_eth_mac_filter *filter, - bool add) -{ - struct i40e_hw *hw; - struct i40e_mac_filter_info mac_filter; - struct rte_ether_addr old_mac; - struct rte_ether_addr *new_mac; - struct i40e_pf_vf *vf = NULL; - uint16_t vf_id; - int ret; - - if (pf == NULL) { - PMD_DRV_LOG(ERR, "Invalid PF argument."); - return -EINVAL; - } - hw = I40E_PF_TO_HW(pf); - - if (filter == NULL) { - PMD_DRV_LOG(ERR, "Invalid mac filter argument."); - return -EINVAL; - } - - new_mac = &filter->mac_addr; - - if (rte_is_zero_ether_addr(new_mac)) { - PMD_DRV_LOG(ERR, "Invalid ethernet address."); - return -EINVAL; - } - - vf_id = filter->dst_id; - - if (vf_id > pf->vf_num - 1 || !pf->vfs) { - PMD_DRV_LOG(ERR, "Invalid argument."); - return -EINVAL; - } - vf = &pf->vfs[vf_id]; - - if (add && rte_is_same_ether_addr(new_mac, &pf->dev_addr)) { - PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address."); - return -EINVAL; - } - - if (add) { - rte_memcpy(&old_mac, hw->mac.addr, RTE_ETHER_ADDR_LEN); - rte_memcpy(hw->mac.addr, new_mac->addr_bytes, - RTE_ETHER_ADDR_LEN); - rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr, - RTE_ETHER_ADDR_LEN); - - mac_filter.filter_type = filter->filter_type; - ret = i40e_vsi_add_mac(vf->vsi, &mac_filter); - if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to add MAC filter."); - return -1; - } - rte_ether_addr_copy(new_mac, &pf->dev_addr); - } else { - rte_memcpy(hw->mac.addr, hw->mac.perm_addr, - RTE_ETHER_ADDR_LEN); - ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr); - if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to delete MAC filter."); - return -1; - } - - /* Clear device address as it has been removed */ - if (rte_is_same_ether_addr(&pf->dev_addr, new_mac)) - memset(&pf->dev_addr, 0, sizeof(struct rte_ether_addr)); - } - - return 0; -} - -/* MAC filter handle */ -static int -i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, - void *arg) -{ - struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct rte_eth_mac_filter *filter; - struct i40e_hw *hw = I40E_PF_TO_HW(pf); - int ret = I40E_NOT_SUPPORTED; - - filter = (struct rte_eth_mac_filter *)(arg); - - switch (filter_op) { - case RTE_ETH_FILTER_NOP: - ret = I40E_SUCCESS; - break; - case RTE_ETH_FILTER_ADD: - i40e_pf_disable_irq0(hw); - if (filter->is_vf) - ret = i40e_vf_mac_filter_set(pf, filter, 1); - i40e_pf_enable_irq0(hw); - break; - case RTE_ETH_FILTER_DELETE: - i40e_pf_disable_irq0(hw); - if (filter->is_vf) - ret = i40e_vf_mac_filter_set(pf, filter, 0); - i40e_pf_enable_irq0(hw); - break; - default: - PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); - ret = I40E_ERR_PARAM; - break; - } - - return ret; -} - static int i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) { @@ -4540,7 +4381,6 @@ i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) { struct i40e_pf *pf; struct i40e_hw *hw; - int ret; if (!vsi || !lut) return -EINVAL; @@ -4549,12 +4389,16 @@ i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) hw = I40E_VSI_TO_HW(vsi); if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { - ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, - vsi->type != I40E_VSI_SRIOV, - lut, lut_size); - if (ret) { - PMD_DRV_LOG(ERR, "Failed to set RSS lookup table"); - return ret; + enum i40e_status_code status; + + status = i40e_aq_set_rss_lut(hw, vsi->vsi_id, + vsi->type != I40E_VSI_SRIOV, + lut, lut_size); + if (status) { + PMD_DRV_LOG(ERR, + "Failed to update RSS lookup table, error status: %d", + status); + return -EIO; } } else { uint32_t *lut_dw = (uint32_t *)lut; @@ -4589,7 +4433,7 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev, int ret; if (reta_size != lut_size || - reta_size > ETH_RSS_RETA_SIZE_512) { + reta_size > RTE_ETH_RSS_RETA_SIZE_512) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)", reta_size, lut_size); @@ -4605,8 +4449,8 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev, if (ret) goto out; for (i = 0; i < reta_size; i++) { - idx = i / RTE_RETA_GROUP_SIZE; - shift = i % RTE_RETA_GROUP_SIZE; + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; if (reta_conf[idx].mask & (1ULL << shift)) lut[i] = reta_conf[idx].reta[shift]; } @@ -4632,7 +4476,7 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev, int ret; if (reta_size != lut_size || - reta_size > ETH_RSS_RETA_SIZE_512) { + reta_size > RTE_ETH_RSS_RETA_SIZE_512) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)", reta_size, lut_size); @@ -4649,8 +4493,8 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev, if (ret) goto out; for (i = 0; i < reta_size; i++) { - idx = i / RTE_RETA_GROUP_SIZE; - shift = i % RTE_RETA_GROUP_SIZE; + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; if (reta_conf[idx].mask & (1ULL << shift)) reta_conf[idx].reta[shift] = lut[i]; } @@ -4674,13 +4518,15 @@ i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw, u64 size, u32 alignment) { + static uint64_t i40e_dma_memzone_id; const struct rte_memzone *mz = NULL; char z_name[RTE_MEMZONE_NAMESIZE]; if (!mem) return I40E_ERR_PARAM; - snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand()); + snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64, + __atomic_fetch_add(&i40e_dma_memzone_id, 1, __ATOMIC_RELAXED)); mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M); if (!mz) @@ -4965,7 +4811,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi, hw->func_caps.num_vsis - vsi_count); pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi, - ETH_64_POOLS); + RTE_ETH_64_POOLS); if (pf->max_nb_vmdq_vsi) { pf->flags |= I40E_FLAG_VMDQ; pf->vmdq_nb_qps = pf->vmdq_nb_qp_max; @@ -5554,7 +5400,7 @@ i40e_vsi_release(struct i40e_vsi *vsi) /* VSI has child to attach, release child first */ if (vsi->veb) { - TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) { + RTE_TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) { if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS) return -1; } @@ -5562,7 +5408,8 @@ i40e_vsi_release(struct i40e_vsi *vsi) } if (vsi->floating_veb) { - TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) { + RTE_TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, + list, temp) { if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS) return -1; } @@ -5570,7 +5417,7 @@ i40e_vsi_release(struct i40e_vsi *vsi) /* Remove all macvlan filters of the VSI */ i40e_vsi_remove_all_macvlan_filter(vsi); - TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) + RTE_TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) rte_free(f); if (vsi->type != I40E_VSI_MAIN && @@ -5648,7 +5495,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) mac = &f->mac_info.mac_addr; rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr, ETH_ADDR_LEN); - f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH; + f->mac_info.filter_type = I40E_MACVLAN_PERFECT_MATCH; TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); vsi->mac_num++; @@ -5656,7 +5503,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) } rte_memcpy(&filter.mac_addr, (struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN); - filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; return i40e_vsi_add_mac(vsi, &filter); } @@ -5689,7 +5536,7 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi) &ets_sla_config, NULL); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, - "VSI failed to get TC bandwdith configuration %u", + "VSI failed to get TC bandwidth configuration %u", hw->aq.asq_last_status); return ret; } @@ -6122,7 +5969,7 @@ i40e_vsi_setup(struct i40e_pf *pf, /* MAC/VLAN configuration */ rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); - filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; ret = i40e_vsi_add_mac(vsi, &filter); if (ret != I40E_SUCCESS) { @@ -6150,15 +5997,15 @@ i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on) struct i40e_mac_filter *f; void *temp; struct i40e_mac_filter_info *mac_filter; - enum rte_mac_filter_type desired_filter; + enum i40e_mac_filter_type desired_filter; int ret = I40E_SUCCESS; if (on) { /* Filter to match MAC and VLAN */ - desired_filter = RTE_MACVLAN_PERFECT_MATCH; + desired_filter = I40E_MACVLAN_PERFECT_MATCH; } else { /* Filter to match only MAC */ - desired_filter = RTE_MAC_PERFECT_MATCH; + desired_filter = I40E_MAC_PERFECT_MATCH; } num = vsi->mac_num; @@ -6173,7 +6020,7 @@ i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on) i = 0; /* Remove all existing mac */ - TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) { + RTE_TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) { mac_filter[i] = f->mac_info; ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr); if (ret) { @@ -6250,10 +6097,10 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev) int mask = 0; /* Apply vlan offload setting */ - mask = ETH_VLAN_STRIP_MASK | - ETH_QINQ_STRIP_MASK | - ETH_VLAN_FILTER_MASK | - ETH_VLAN_EXTEND_MASK; + mask = RTE_ETH_VLAN_STRIP_MASK | + RTE_ETH_QINQ_STRIP_MASK | + RTE_ETH_VLAN_FILTER_MASK | + RTE_ETH_VLAN_EXTEND_MASK; ret = i40e_vlan_offload_set(dev, mask); if (ret) { PMD_DRV_LOG(INFO, "Failed to update vlan offload"); @@ -6348,6 +6195,8 @@ i40e_pf_setup(struct i40e_pf *pf) memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats)); memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats)); memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats)); + pf->rx_err1 = 0; + pf->rx_err1_offset = 0; ret = i40e_pf_get_switch_config(pf); if (ret != I40E_SUCCESS) { @@ -6380,9 +6229,9 @@ i40e_pf_setup(struct i40e_pf *pf) /* Configure filter control */ memset(&settings, 0, sizeof(settings)); - if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128) + if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_128) settings.hash_lut_size = I40E_HASH_LUT_SIZE_128; - else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512) + else if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_512) settings.hash_lut_size = I40E_HASH_LUT_SIZE_512; else { PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported", @@ -6537,8 +6386,7 @@ i40e_dev_tx_init(struct i40e_pf *pf) break; } if (ret == I40E_SUCCESS) - i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf) - ->eth_dev); + i40e_set_tx_function(&rte_eth_devices[pf->dev_data->port_id]); return ret; } @@ -6566,8 +6414,7 @@ i40e_dev_rx_init(struct i40e_pf *pf) } } if (ret == I40E_SUCCESS) - i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf) - ->eth_dev); + i40e_set_rx_function(&rte_eth_devices[pf->dev_data->port_id]); return ret; } @@ -6717,9 +6564,13 @@ i40e_stat_update_48(struct i40e_hw *hw, { uint64_t new_data; - new_data = (uint64_t)I40E_READ_REG(hw, loreg); - new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) & - I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH; + if (hw->device_id == I40E_DEV_ID_QEMU) { + new_data = (uint64_t)I40E_READ_REG(hw, loreg); + new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) & + I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH; + } else { + new_data = I40E_READ_REG64(hw, loreg); + } if (!offset_loaded) *offset = new_data; @@ -6860,6 +6711,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) if (!ret) rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + break; default: PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", @@ -6963,7 +6815,7 @@ i40e_handle_mdd_event(struct rte_eth_dev *dev) * @param handle * Pointer to interrupt handle. * @param param - * The address of parameter (struct rte_eth_dev *) regsitered before. + * The address of parameter (struct rte_eth_dev *) registered before. * * @return * void @@ -7101,18 +6953,18 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi, rte_cpu_to_le_16(filter[num + i].vlan_id); switch (filter[num + i].filter_type) { - case RTE_MAC_PERFECT_MATCH: + case I40E_MAC_PERFECT_MATCH: flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH | I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; break; - case RTE_MACVLAN_PERFECT_MATCH: + case I40E_MACVLAN_PERFECT_MATCH: flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; break; - case RTE_MAC_HASH_MATCH: + case I40E_MAC_HASH_MATCH: flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH | I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; break; - case RTE_MACVLAN_HASH_MATCH: + case I40E_MACVLAN_HASH_MATCH: flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH; break; default: @@ -7151,6 +7003,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi, int ret = I40E_SUCCESS; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); struct i40e_aqc_remove_macvlan_element_data *req_list; + enum i40e_admin_queue_err aq_status; if (filter == NULL || total == 0) return I40E_ERR_PARAM; @@ -7176,18 +7029,18 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi, rte_cpu_to_le_16(filter[num + i].vlan_id); switch (filter[num + i].filter_type) { - case RTE_MAC_PERFECT_MATCH: + case I40E_MAC_PERFECT_MATCH: flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; break; - case RTE_MACVLAN_PERFECT_MATCH: + case I40E_MACVLAN_PERFECT_MATCH: flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; break; - case RTE_MAC_HASH_MATCH: + case I40E_MAC_HASH_MATCH: flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH | I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; break; - case RTE_MACVLAN_HASH_MATCH: + case I40E_MACVLAN_HASH_MATCH: flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH; break; default: @@ -7198,11 +7051,17 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi, req_list[i].flags = rte_cpu_to_le_16(flags); } - ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list, - actual_num, NULL); + ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, req_list, + actual_num, NULL, &aq_status); + if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to remove macvlan filter"); - goto DONE; + /* Do not report as an error when firmware returns ENOENT */ + if (aq_status == I40E_AQ_RC_ENOENT) { + ret = I40E_SUCCESS; + } else { + PMD_DRV_LOG(ERR, "Failed to remove macvlan filter"); + goto DONE; + } } num += actual_num; } while (num < total); @@ -7233,7 +7092,7 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi, { uint32_t vid_idx, vid_bit; - if (vlan_id > ETH_VLAN_ID_MAX) + if (vlan_id > RTE_ETH_VLAN_ID_MAX) return 0; vid_idx = I40E_VFTA_IDX(vlan_id); @@ -7268,7 +7127,7 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi, struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0}; int ret; - if (vlan_id > ETH_VLAN_ID_MAX) + if (vlan_id > RTE_ETH_VLAN_ID_MAX) return; i40e_store_vlan_filter(vsi, vlan_id, on); @@ -7532,8 +7391,8 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr); if (f != NULL) return I40E_SUCCESS; - if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) || - (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) { + if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH || + mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) { /** * If vlan_num is 0, that's the first time to add mac, @@ -7544,8 +7403,8 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) vsi->vlan_num = 1; } vlan_num = vsi->vlan_num; - } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) || - (mac_filter->filter_type == RTE_MAC_HASH_MATCH)) + } else if (mac_filter->filter_type == I40E_MAC_PERFECT_MATCH || + mac_filter->filter_type == I40E_MAC_HASH_MATCH) vlan_num = 1; mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); @@ -7560,8 +7419,8 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) ETH_ADDR_LEN); } - if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH || - mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) { + if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH || + mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) { ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, &mac_filter->mac_addr); if (ret != I40E_SUCCESS) @@ -7598,7 +7457,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr) struct i40e_mac_filter *f; struct i40e_macvlan_filter *mv_f; int i, vlan_num; - enum rte_mac_filter_type filter_type; + enum i40e_mac_filter_type filter_type; int ret = I40E_SUCCESS; /* Can't find it, return an error */ @@ -7608,14 +7467,14 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr) vlan_num = vsi->vlan_num; filter_type = f->mac_info.filter_type; - if (filter_type == RTE_MACVLAN_PERFECT_MATCH || - filter_type == RTE_MACVLAN_HASH_MATCH) { + if (filter_type == I40E_MACVLAN_PERFECT_MATCH || + filter_type == I40E_MACVLAN_HASH_MATCH) { if (vlan_num == 0) { PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0"); return I40E_ERR_PARAM; } - } else if (filter_type == RTE_MAC_PERFECT_MATCH || - filter_type == RTE_MAC_HASH_MATCH) + } else if (filter_type == I40E_MAC_PERFECT_MATCH || + filter_type == I40E_MAC_HASH_MATCH) vlan_num = 1; mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); @@ -7629,8 +7488,8 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr) rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); } - if (filter_type == RTE_MACVLAN_PERFECT_MATCH || - filter_type == RTE_MACVLAN_HASH_MATCH) { + if (filter_type == I40E_MACVLAN_PERFECT_MATCH || + filter_type == I40E_MACVLAN_HASH_MATCH) { ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr); if (ret != I40E_SUCCESS) goto DONE; @@ -7687,7 +7546,7 @@ i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags) } /* Disable RSS */ -static void +void i40e_pf_disable_rss(struct i40e_pf *pf) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); @@ -7705,7 +7564,6 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ? I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX; - int ret = 0; if (!key || key_len == 0) { PMD_DRV_LOG(DEBUG, "No key to be configured"); @@ -7718,11 +7576,16 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { struct i40e_aqc_get_set_rss_key_data *key_dw = - (struct i40e_aqc_get_set_rss_key_data *)key; + (struct i40e_aqc_get_set_rss_key_data *)key; + enum i40e_status_code status = + i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw); - ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw); - if (ret) - PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ"); + if (status) { + PMD_DRV_LOG(ERR, + "Failed to configure RSS key via AQ, error status: %d", + status); + return -EIO; + } } else { uint32_t *hash_key = (uint32_t *)key; uint16_t i; @@ -7742,7 +7605,7 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) I40E_WRITE_FLUSH(hw); } - return ret; + return 0; } static int @@ -7858,25 +7721,25 @@ static int i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag) { switch (filter_type) { - case RTE_TUNNEL_FILTER_IMAC_IVLAN: + case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN: *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN; break; - case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID: + case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID: *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID; break; - case RTE_TUNNEL_FILTER_IMAC_TENID: + case RTE_ETH_TUNNEL_FILTER_IMAC_TENID: *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID; break; - case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC: + case RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC: *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC; break; - case ETH_TUNNEL_FILTER_IMAC: + case RTE_ETH_TUNNEL_FILTER_IMAC: *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC; break; - case ETH_TUNNEL_FILTER_OIP: + case RTE_ETH_TUNNEL_FILTER_OIP: *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP; break; - case ETH_TUNNEL_FILTER_IIP: + case RTE_ETH_TUNNEL_FILTER_IIP: *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP; break; default: @@ -7977,178 +7840,39 @@ i40e_sw_tunnel_filter_del(struct i40e_pf *pf, return 0; } -int -i40e_dev_tunnel_filter_set(struct i40e_pf *pf, - struct rte_eth_tunnel_filter_conf *tunnel_filter, - uint8_t add) +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48 +#define I40E_TR_VXLAN_GRE_KEY_MASK 0x4 +#define I40E_TR_GENEVE_KEY_MASK 0x8 +#define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40 +#define I40E_TR_GRE_KEY_MASK 0x400 +#define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800 +#define I40E_TR_GRE_NO_KEY_MASK 0x8000 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80 +#define I40E_DIRECTION_INGRESS_KEY 0x8000 +#define I40E_TR_L4_TYPE_TCP 0x2 +#define I40E_TR_L4_TYPE_UDP 0x4 +#define I40E_TR_L4_TYPE_SCTP 0x8 + +static enum +i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) { - uint16_t ip_type; - uint32_t ipv4_addr, ipv4_addr_le; - uint8_t i, tun_type = 0; - /* internal varialbe to convert ipv6 byte order */ - uint32_t convert_ipv6[4]; - int val, ret = 0; + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct i40e_vsi *vsi = pf->main_vsi; - struct i40e_aqc_cloud_filters_element_bb *cld_filter; - struct i40e_aqc_cloud_filters_element_bb *pfilter; - struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; - struct i40e_tunnel_filter *tunnel, *node; - struct i40e_tunnel_filter check_filter; /* Check if filter exists */ - - cld_filter = rte_zmalloc("tunnel_filter", - sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext), - 0); + struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id]; + enum i40e_status_code status = I40E_SUCCESS; - if (NULL == cld_filter) { - PMD_DRV_LOG(ERR, "Failed to alloc memory."); - return -ENOMEM; + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace l1 filter is not supported."); + return I40E_NOT_SUPPORTED; } - pfilter = cld_filter; - - rte_ether_addr_copy(&tunnel_filter->outer_mac, - (struct rte_ether_addr *)&pfilter->element.outer_mac); - rte_ether_addr_copy(&tunnel_filter->inner_mac, - (struct rte_ether_addr *)&pfilter->element.inner_mac); - pfilter->element.inner_vlan = - rte_cpu_to_le_16(tunnel_filter->inner_vlan); - if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) { - ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; - ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr); - ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr); - rte_memcpy(&pfilter->element.ipaddr.v4.data, - &ipv4_addr_le, - sizeof(pfilter->element.ipaddr.v4.data)); - } else { - ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; - for (i = 0; i < 4; i++) { - convert_ipv6[i] = - rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i])); - } - rte_memcpy(&pfilter->element.ipaddr.v6.data, - &convert_ipv6, - sizeof(pfilter->element.ipaddr.v6.data)); - } - - /* check tunneled type */ - switch (tunnel_filter->tunnel_type) { - case RTE_TUNNEL_TYPE_VXLAN: - tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN; - break; - case RTE_TUNNEL_TYPE_NVGRE: - tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC; - break; - case RTE_TUNNEL_TYPE_IP_IN_GRE: - tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP; - break; - case RTE_TUNNEL_TYPE_VXLAN_GPE: - tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE; - break; - default: - /* Other tunnel types is not supported. */ - PMD_DRV_LOG(ERR, "tunnel type is not supported."); - rte_free(cld_filter); - return -EINVAL; - } - - val = i40e_dev_get_filter_type(tunnel_filter->filter_type, - &pfilter->element.flags); - if (val < 0) { - rte_free(cld_filter); - return -EINVAL; - } - - pfilter->element.flags |= rte_cpu_to_le_16( - I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | - ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)); - pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); - pfilter->element.queue_number = - rte_cpu_to_le_16(tunnel_filter->queue_id); - - /* Check if there is the filter in SW list */ - memset(&check_filter, 0, sizeof(check_filter)); - i40e_tunnel_filter_convert(cld_filter, &check_filter); - node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input); - if (add && node) { - PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!"); - rte_free(cld_filter); - return -EINVAL; - } - - if (!add && !node) { - PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!"); - rte_free(cld_filter); - return -EINVAL; - } - - if (add) { - ret = i40e_aq_add_cloud_filters(hw, - vsi->seid, &cld_filter->element, 1); - if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to add a tunnel filter."); - rte_free(cld_filter); - return -ENOTSUP; - } - tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); - if (tunnel == NULL) { - PMD_DRV_LOG(ERR, "Failed to alloc memory."); - rte_free(cld_filter); - return -ENOMEM; - } - - rte_memcpy(tunnel, &check_filter, sizeof(check_filter)); - ret = i40e_sw_tunnel_filter_insert(pf, tunnel); - if (ret < 0) - rte_free(tunnel); - } else { - ret = i40e_aq_rem_cloud_filters(hw, vsi->seid, - &cld_filter->element, 1); - if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); - rte_free(cld_filter); - return -ENOTSUP; - } - ret = i40e_sw_tunnel_filter_del(pf, &node->input); - } - - rte_free(cld_filter); - return ret; -} - -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48 -#define I40E_TR_VXLAN_GRE_KEY_MASK 0x4 -#define I40E_TR_GENEVE_KEY_MASK 0x8 -#define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40 -#define I40E_TR_GRE_KEY_MASK 0x400 -#define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800 -#define I40E_TR_GRE_NO_KEY_MASK 0x8000 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80 -#define I40E_DIRECTION_INGRESS_KEY 0x8000 -#define I40E_TR_L4_TYPE_TCP 0x2 -#define I40E_TR_L4_TYPE_UDP 0x4 -#define I40E_TR_L4_TYPE_SCTP 0x8 - -static enum -i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) -{ - struct i40e_aqc_replace_cloud_filters_cmd filter_replace; - struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; - struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; - enum i40e_status_code status = I40E_SUCCESS; - - if (pf->support_multi_driver) { - PMD_DRV_LOG(ERR, "Replace l1 filter is not supported."); - return I40E_NOT_SUPPORTED; - } - - memset(&filter_replace, 0, - sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); - memset(&filter_replace_buf, 0, - sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); /* create L1 filter */ filter_replace.old_filter_type = @@ -8198,7 +7922,7 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) struct i40e_aqc_replace_cloud_filters_cmd filter_replace; struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id]; enum i40e_status_code status = I40E_SUCCESS; if (pf->support_multi_driver) { @@ -8273,7 +7997,7 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf) struct i40e_aqc_replace_cloud_filters_cmd filter_replace; struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id]; enum i40e_status_code status = I40E_SUCCESS; if (pf->support_multi_driver) { @@ -8361,7 +8085,7 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf) struct i40e_aqc_replace_cloud_filters_cmd filter_replace; struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id]; enum i40e_status_code status = I40E_SUCCESS; if (pf->support_multi_driver) { @@ -8436,7 +8160,7 @@ i40e_replace_port_l1_filter(struct i40e_pf *pf, struct i40e_aqc_replace_cloud_filters_cmd filter_replace; enum i40e_status_code status = I40E_SUCCESS; struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id]; if (pf->support_multi_driver) { PMD_DRV_LOG(ERR, "Replace l1 filter is not supported."); @@ -8508,7 +8232,7 @@ i40e_replace_port_cloud_filter(struct i40e_pf *pf, struct i40e_aqc_replace_cloud_filters_cmd filter_replace; enum i40e_status_code status = I40E_SUCCESS; struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id]; if (pf->support_multi_driver) { PMD_DRV_LOG(ERR, "Replace cloud filter is not supported."); @@ -8981,16 +8705,16 @@ i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, return -EINVAL; switch (udp_tunnel->prot_type) { - case RTE_TUNNEL_TYPE_VXLAN: + case RTE_ETH_TUNNEL_TYPE_VXLAN: ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port, I40E_AQC_TUNNEL_TYPE_VXLAN); break; - case RTE_TUNNEL_TYPE_VXLAN_GPE: + case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE: ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port, I40E_AQC_TUNNEL_TYPE_VXLAN_GPE); break; - case RTE_TUNNEL_TYPE_GENEVE: - case RTE_TUNNEL_TYPE_TEREDO: + case RTE_ETH_TUNNEL_TYPE_GENEVE: + case RTE_ETH_TUNNEL_TYPE_TEREDO: PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); ret = -1; break; @@ -9016,12 +8740,12 @@ i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, return -EINVAL; switch (udp_tunnel->prot_type) { - case RTE_TUNNEL_TYPE_VXLAN: - case RTE_TUNNEL_TYPE_VXLAN_GPE: + case RTE_ETH_TUNNEL_TYPE_VXLAN: + case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE: ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port); break; - case RTE_TUNNEL_TYPE_GENEVE: - case RTE_TUNNEL_TYPE_TEREDO: + case RTE_ETH_TUNNEL_TYPE_GENEVE: + case RTE_ETH_TUNNEL_TYPE_TEREDO: PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); ret = -1; break; @@ -9035,7 +8759,7 @@ i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, } /* Calculate the maximum number of contiguous PF queues that are configured */ -static int +int i40e_pf_calc_configured_queues_num(struct i40e_pf *pf) { struct rte_eth_dev_data *data = pf->dev_data; @@ -9054,101 +8778,161 @@ i40e_pf_calc_configured_queues_num(struct i40e_pf *pf) return num; } -/* Configure RSS */ -static int -i40e_pf_config_rss(struct i40e_pf *pf) +/* Reset the global configure of hash function and input sets */ +static void +i40e_pf_global_rss_reset(struct i40e_pf *pf) { - enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode; struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_eth_rss_conf rss_conf; - uint32_t i, lut = 0; - uint16_t j, num; + uint32_t reg, reg_val; + int i; - /* - * If both VMDQ and RSS enabled, not all of PF queues are configured. - * It's necessary to calculate the actual PF queues that are configured. + /* Reset global RSS function sets */ + reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); + if (!(reg_val & I40E_GLQF_CTL_HTOEP_MASK)) { + reg_val |= I40E_GLQF_CTL_HTOEP_MASK; + i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg_val); + } + + for (i = 0; i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; i++) { + uint64_t inset; + int j, pctype; + + if (hw->mac.type == I40E_MAC_X722) + pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(i)); + else + pctype = i; + + /* Reset pctype insets */ + inset = i40e_get_default_input_set(i); + if (inset) { + pf->hash_input_set[pctype] = inset; + inset = i40e_translate_input_set_reg(hw->mac.type, + inset); + + reg = I40E_GLQF_HASH_INSET(0, pctype); + i40e_check_write_global_reg(hw, reg, (uint32_t)inset); + reg = I40E_GLQF_HASH_INSET(1, pctype); + i40e_check_write_global_reg(hw, reg, + (uint32_t)(inset >> 32)); + + /* Clear unused mask registers of the pctype */ + for (j = 0; j < I40E_INSET_MASK_NUM_REG; j++) { + reg = I40E_GLQF_HASH_MSK(j, pctype); + i40e_check_write_global_reg(hw, reg, 0); + } + } + + /* Reset pctype symmetric sets */ + reg = I40E_GLQF_HSYM(pctype); + reg_val = i40e_read_rx_ctl(hw, reg); + if (reg_val & I40E_GLQF_HSYM_SYMH_ENA_MASK) { + reg_val &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK; + i40e_write_global_rx_ctl(hw, reg, reg_val); + } + } + I40E_WRITE_FLUSH(hw); +} + +int +i40e_pf_reset_rss_reta(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->adapter->hw; + uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512]; + uint32_t i; + int num; + + /* If both VMDQ and RSS enabled, not all of PF queues are + * configured. It's necessary to calculate the actual PF + * queues that are configured. */ - if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) + if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) num = i40e_pf_calc_configured_queues_num(pf); else num = pf->dev_data->nb_rx_queues; num = RTE_MIN(num, I40E_MAX_Q_PER_TC); - PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured", - num); + if (num <= 0) + return 0; - if (num == 0) { - PMD_INIT_LOG(ERR, - "No PF queues are configured to enable RSS for port %u", - pf->dev_data->port_id); - return -ENOTSUP; - } + for (i = 0; i < hw->func_caps.rss_table_size; i++) + lut[i] = (uint8_t)(i % (uint32_t)num); - if (pf->adapter->rss_reta_updated == 0) { - for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { - if (j == num) - j = 0; - lut = (lut << 8) | (j & ((0x1 << - hw->func_caps.rss_table_entry_width) - 1)); - if ((i & 3) == 3) - I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), - rte_bswap32(lut)); - } - } + return i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i); +} - rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf; - if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0 || - !(mq_mode & ETH_MQ_RX_RSS_FLAG)) { - i40e_pf_disable_rss(pf); - return 0; - } - if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < - (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) { - /* Random default keys */ +int +i40e_pf_reset_rss_key(struct i40e_pf *pf) +{ + const uint8_t key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + uint8_t *rss_key; + + /* Reset key */ + rss_key = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key; + if (!rss_key || + pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key_len < key_len) { static uint32_t rss_key_default[] = {0x6b793944, 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8, 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605, 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581}; - rss_conf.rss_key = (uint8_t *)rss_key_default; - rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * - sizeof(uint32_t); + rss_key = (uint8_t *)rss_key_default; } - return i40e_hw_rss_hash_set(pf, &rss_conf); + return i40e_set_rss_key(pf->main_vsi, rss_key, key_len); } static int -i40e_tunnel_filter_param_check(struct i40e_pf *pf, - struct rte_eth_tunnel_filter_conf *filter) +i40e_pf_rss_reset(struct i40e_pf *pf) { - if (pf == NULL || filter == NULL) { - PMD_DRV_LOG(ERR, "Invalid parameter"); - return -EINVAL; - } + struct i40e_hw *hw = I40E_PF_TO_HW(pf); - if (filter->queue_id >= pf->dev_data->nb_rx_queues) { - PMD_DRV_LOG(ERR, "Invalid queue ID"); - return -EINVAL; - } + int ret; - if (filter->inner_vlan > RTE_ETHER_MAX_VLAN_ID) { - PMD_DRV_LOG(ERR, "Invalid inner VLAN ID"); - return -EINVAL; - } + pf->hash_filter_enabled = 0; + i40e_pf_disable_rss(pf); + i40e_set_symmetric_hash_enable_per_port(hw, 0); - if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) && - (rte_is_zero_ether_addr(&filter->outer_mac))) { - PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address"); - return -EINVAL; + if (!pf->support_multi_driver) + i40e_pf_global_rss_reset(pf); + + /* Reset RETA table */ + if (pf->adapter->rss_reta_updated == 0) { + ret = i40e_pf_reset_rss_reta(pf); + if (ret) + return ret; } - if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) && - (rte_is_zero_ether_addr(&filter->inner_mac))) { - PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address"); - return -EINVAL; + return i40e_pf_reset_rss_key(pf); +} + +/* Configure RSS */ +int +i40e_pf_config_rss(struct i40e_pf *pf) +{ + struct i40e_hw *hw; + enum rte_eth_rx_mq_mode mq_mode; + uint64_t rss_hf, hena; + int ret; + + ret = i40e_pf_rss_reset(pf); + if (ret) { + PMD_DRV_LOG(ERR, "Reset RSS failed, RSS has been disabled"); + return ret; } + rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf; + mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode; + if (!(rss_hf & pf->adapter->flow_types_mask) || + !(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) + return 0; + + hw = I40E_PF_TO_HW(pf); + hena = i40e_config_hena(pf->adapter, rss_hf); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); + I40E_WRITE_FLUSH(hw); + return 0; } @@ -9196,287 +8980,35 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) return ret; } -static int -i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg) -{ - int ret = -EINVAL; - - if (!hw || !cfg) - return -EINVAL; - - switch (cfg->cfg_type) { - case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN: - ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len); - break; - default: - PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type); - break; - } - - return ret; -} - -static int -i40e_filter_ctrl_global_config(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) -{ - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret = I40E_ERR_PARAM; - - switch (filter_op) { - case RTE_ETH_FILTER_SET: - ret = i40e_dev_global_config_set(hw, - (struct rte_eth_global_cfg *)arg); - break; - default: - PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); - break; - } - - return ret; -} - -static int -i40e_tunnel_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) -{ - struct rte_eth_tunnel_filter_conf *filter; - struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - int ret = I40E_SUCCESS; - - filter = (struct rte_eth_tunnel_filter_conf *)(arg); - - if (i40e_tunnel_filter_param_check(pf, filter) < 0) - return I40E_ERR_PARAM; - - switch (filter_op) { - case RTE_ETH_FILTER_NOP: - if (!(pf->flags & I40E_FLAG_VXLAN)) - ret = I40E_NOT_SUPPORTED; - break; - case RTE_ETH_FILTER_ADD: - ret = i40e_dev_tunnel_filter_set(pf, filter, 1); - break; - case RTE_ETH_FILTER_DELETE: - ret = i40e_dev_tunnel_filter_set(pf, filter, 0); - break; - default: - PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); - ret = I40E_ERR_PARAM; - break; - } - - return ret; -} - -/* Get the symmetric hash enable configurations per port */ -static void -i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable) -{ - uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0); - - *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0; -} - /* Set the symmetric hash enable configurations per port */ -static void +void i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable) { uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0); if (enable > 0) { - if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) { - PMD_DRV_LOG(INFO, - "Symmetric hash has already been enabled"); + if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) return; - } + reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK; } else { - if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) { - PMD_DRV_LOG(INFO, - "Symmetric hash has already been disabled"); + if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) return; - } + reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK; } i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg); I40E_WRITE_FLUSH(hw); } -/* - * Get global configurations of hash function type and symmetric hash enable - * per flow type (pctype). Note that global configuration means it affects all - * the ports on the same NIC. +/** + * Valid input sets for hash and flow director filters per PCTYPE */ -static int -i40e_get_hash_filter_global_config(struct i40e_hw *hw, - struct rte_eth_hash_global_conf *g_cfg) +static uint64_t +i40e_get_valid_input_set(enum i40e_filter_pctype pctype, + enum rte_filter_type filter) { - struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back; - uint32_t reg; - uint16_t i, j; - - memset(g_cfg, 0, sizeof(*g_cfg)); - reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); - if (reg & I40E_GLQF_CTL_HTOEP_MASK) - g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ; - else - g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR; - PMD_DRV_LOG(DEBUG, "Hash function is %s", - (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR"); - - /* - * As i40e supports less than 64 flow types, only first 64 bits need to - * be checked. - */ - for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) { - g_cfg->valid_bit_mask[i] = 0ULL; - g_cfg->sym_hash_enable_mask[i] = 0ULL; - } - - g_cfg->valid_bit_mask[0] = adapter->flow_types_mask; - - for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) { - if (!adapter->pctypes_tbl[i]) - continue; - for (j = I40E_FILTER_PCTYPE_INVALID + 1; - j < I40E_FILTER_PCTYPE_MAX; j++) { - if (adapter->pctypes_tbl[i] & (1ULL << j)) { - reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j)); - if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) { - g_cfg->sym_hash_enable_mask[0] |= - (1ULL << i); - } - } - } - } - - return 0; -} - -static int -i40e_hash_global_config_check(const struct i40e_adapter *adapter, - const struct rte_eth_hash_global_conf *g_cfg) -{ - uint32_t i; - uint64_t mask0, i40e_mask = adapter->flow_types_mask; - - if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ && - g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR && - g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) { - PMD_DRV_LOG(ERR, "Unsupported hash function type %d", - g_cfg->hash_func); - return -EINVAL; - } - - /* - * As i40e supports less than 64 flow types, only first 64 bits need to - * be checked. - */ - mask0 = g_cfg->valid_bit_mask[0]; - for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) { - if (i == 0) { - /* Check if any unsupported flow type configured */ - if ((mask0 | i40e_mask) ^ i40e_mask) - goto mask_err; - } else { - if (g_cfg->valid_bit_mask[i]) - goto mask_err; - } - } - - return 0; - -mask_err: - PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured"); - - return -EINVAL; -} - -/* - * Set global configurations of hash function type and symmetric hash enable - * per flow type (pctype). Note any modifying global configuration will affect - * all the ports on the same NIC. - */ -static int -i40e_set_hash_filter_global_config(struct i40e_hw *hw, - struct rte_eth_hash_global_conf *g_cfg) -{ - struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back; - struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; - int ret; - uint16_t i, j; - uint32_t reg; - uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask; - - if (pf->support_multi_driver) { - PMD_DRV_LOG(ERR, "Hash global configuration is not supported."); - return -ENOTSUP; - } - - /* Check the input parameters */ - ret = i40e_hash_global_config_check(adapter, g_cfg); - if (ret < 0) - return ret; - - /* - * As i40e supports less than 64 flow types, only first 64 bits need to - * be configured. - */ - for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) { - if (mask0 & (1UL << i)) { - reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ? - I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; - - for (j = I40E_FILTER_PCTYPE_INVALID + 1; - j < I40E_FILTER_PCTYPE_MAX; j++) { - if (adapter->pctypes_tbl[i] & (1ULL << j)) - i40e_write_global_rx_ctl(hw, - I40E_GLQF_HSYM(j), - reg); - } - } - } - - reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); - if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) { - /* Toeplitz */ - if (reg & I40E_GLQF_CTL_HTOEP_MASK) { - PMD_DRV_LOG(DEBUG, - "Hash function already set to Toeplitz"); - goto out; - } - reg |= I40E_GLQF_CTL_HTOEP_MASK; - } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { - /* Simple XOR */ - if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) { - PMD_DRV_LOG(DEBUG, - "Hash function already set to Simple XOR"); - goto out; - } - reg &= ~I40E_GLQF_CTL_HTOEP_MASK; - } else - /* Use the default, and keep it as it is */ - goto out; - - i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg); - -out: - I40E_WRITE_FLUSH(hw); - - return 0; -} - -/** - * Valid input sets for hash and flow director filters per PCTYPE - */ -static uint64_t -i40e_get_valid_input_set(enum i40e_filter_pctype pctype, - enum rte_filter_type filter) -{ - uint64_t valid; + uint64_t valid; static const uint64_t valid_hash_inset_table[] = { [I40E_FILTER_PCTYPE_FRAG_IPV4] = @@ -9807,103 +9339,6 @@ i40e_get_default_input_set(uint16_t pctype) return default_inset_table[pctype]; } -/** - * Parse the input set from index to logical bit masks - */ -static int -i40e_parse_input_set(uint64_t *inset, - enum i40e_filter_pctype pctype, - enum rte_eth_input_set_field *field, - uint16_t size) -{ - uint16_t i, j; - int ret = -EINVAL; - - static const struct { - enum rte_eth_input_set_field field; - uint64_t inset; - } inset_convert_table[] = { - {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE}, - {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC}, - {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC}, - {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER}, - {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER}, - {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE}, - {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC}, - {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST}, - {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS}, - {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO}, - {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL}, - {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC}, - {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST}, - {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC}, - {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER, - I40E_INSET_IPV6_NEXT_HDR}, - {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS, - I40E_INSET_IPV6_HOP_LIMIT}, - {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT}, - {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT}, - {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT}, - {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT}, - {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT}, - {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT}, - {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG, - I40E_INSET_SCTP_VT}, - {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC, - I40E_INSET_TUNNEL_DMAC}, - {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN, - I40E_INSET_VLAN_TUNNEL}, - {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY, - I40E_INSET_TUNNEL_ID}, - {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD, - I40E_INSET_FLEX_PAYLOAD_W1}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD, - I40E_INSET_FLEX_PAYLOAD_W2}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD, - I40E_INSET_FLEX_PAYLOAD_W3}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD, - I40E_INSET_FLEX_PAYLOAD_W4}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD, - I40E_INSET_FLEX_PAYLOAD_W5}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD, - I40E_INSET_FLEX_PAYLOAD_W6}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD, - I40E_INSET_FLEX_PAYLOAD_W7}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD, - I40E_INSET_FLEX_PAYLOAD_W8}, - }; - - if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX) - return ret; - - /* Only one item allowed for default or all */ - if (size == 1) { - if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) { - *inset = i40e_get_default_input_set(pctype); - return 0; - } else if (field[0] == RTE_ETH_INPUT_SET_NONE) { - *inset = I40E_INSET_NONE; - return 0; - } - } - - for (i = 0, *inset = 0; i < size; i++) { - for (j = 0; j < RTE_DIM(inset_convert_table); j++) { - if (field[i] == inset_convert_table[j].field) { - *inset |= inset_convert_table[j].inset; - break; - } - } - - /* It contains unsupported input set, return immediately */ - if (j == RTE_DIM(inset_convert_table)) - return ret; - } - - return 0; -} - /** * Translate the input set from bit masks to register aware bit masks * and vice versa @@ -9993,51 +9428,118 @@ i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input) return val; } +static int +i40e_get_inset_field_offset(struct i40e_hw *hw, uint32_t pit_reg_start, + uint32_t pit_reg_count, uint32_t hdr_off) +{ + const uint32_t pit_reg_end = pit_reg_start + pit_reg_count; + uint32_t field_off = I40E_FDIR_FIELD_OFFSET(hdr_off); + uint32_t i, reg_val, src_off, count; + + for (i = pit_reg_start; i < pit_reg_end; i++) { + reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_PIT(i)); + + src_off = I40E_GLQF_PIT_SOURCE_OFF_GET(reg_val); + count = I40E_GLQF_PIT_FSIZE_GET(reg_val); + + if (src_off <= field_off && (src_off + count) > field_off) + break; + } + + if (i >= pit_reg_end) { + PMD_DRV_LOG(ERR, + "Hardware GLQF_PIT configuration does not support this field mask"); + return -1; + } + + return I40E_GLQF_PIT_DEST_OFF_GET(reg_val) + field_off - src_off; +} + int -i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem) +i40e_generate_inset_mask_reg(struct i40e_hw *hw, uint64_t inset, + uint32_t *mask, uint8_t nb_elem) { - uint8_t i, idx = 0; - uint64_t inset_need_mask = inset; + static const uint64_t mask_inset[] = { + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, + I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT }; static const struct { uint64_t inset; uint32_t mask; - } inset_mask_map[] = { - {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK}, - {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0}, - {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK}, - {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK}, - {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK}, - {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0}, - {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK}, - {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK}, + uint32_t offset; + } inset_mask_offset_map[] = { + { I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK, + offsetof(struct rte_ipv4_hdr, type_of_service) }, + + { I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK, + offsetof(struct rte_ipv4_hdr, next_proto_id) }, + + { I40E_INSET_IPV4_TTL, I40E_INSET_IPV4_TTL_MASK, + offsetof(struct rte_ipv4_hdr, time_to_live) }, + + { I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK, + offsetof(struct rte_ipv6_hdr, vtc_flow) }, + + { I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK, + offsetof(struct rte_ipv6_hdr, proto) }, + + { I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK, + offsetof(struct rte_ipv6_hdr, hop_limits) }, }; - if (!inset || !mask || !nb_elem) + uint32_t i; + int idx = 0; + + assert(mask); + if (!inset) return 0; - for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) { + for (i = 0; i < RTE_DIM(mask_inset); i++) { /* Clear the inset bit, if no MASK is required, * for example proto + ttl */ - if ((inset & inset_mask_map[i].inset) == - inset_mask_map[i].inset && inset_mask_map[i].mask == 0) - inset_need_mask &= ~inset_mask_map[i].inset; - if (!inset_need_mask) - return 0; - } - for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) { - if ((inset_need_mask & inset_mask_map[i].inset) == - inset_mask_map[i].inset) { - if (idx >= nb_elem) { - PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks"); - return -EINVAL; - } - mask[idx] = inset_mask_map[i].mask; - idx++; + if ((mask_inset[i] & inset) == mask_inset[i]) { + inset &= ~mask_inset[i]; + if (!inset) + return 0; } } + for (i = 0; i < RTE_DIM(inset_mask_offset_map); i++) { + uint32_t pit_start, pit_count; + int offset; + + if (!(inset_mask_offset_map[i].inset & inset)) + continue; + + if (inset_mask_offset_map[i].inset & + (I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO | + I40E_INSET_IPV4_TTL)) { + pit_start = I40E_GLQF_PIT_IPV4_START; + pit_count = I40E_GLQF_PIT_IPV4_COUNT; + } else { + pit_start = I40E_GLQF_PIT_IPV6_START; + pit_count = I40E_GLQF_PIT_IPV6_COUNT; + } + + offset = i40e_get_inset_field_offset(hw, pit_start, pit_count, + inset_mask_offset_map[i].offset); + + if (offset < 0) + return -EINVAL; + + if (idx >= nb_elem) { + PMD_DRV_LOG(ERR, + "Configuration of inset mask out of range %u", + nb_elem); + return -ERANGE; + } + + mask[idx] = I40E_GLQF_PIT_BUILD((uint32_t)offset, + inset_mask_offset_map[i].mask); + idx++; + } + return idx; } @@ -10057,9 +9559,10 @@ void i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) { uint32_t reg = i40e_read_rx_ctl(hw, addr); - struct rte_eth_dev *dev; + struct rte_eth_dev_data *dev_data = + ((struct i40e_adapter *)hw->back)->pf.dev_data; + struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id]; - dev = ((struct i40e_adapter *)hw->back)->eth_dev; if (reg != val) { i40e_write_rx_ctl(hw, addr, val); PMD_DRV_LOG(WARNING, @@ -10089,7 +9592,7 @@ i40e_filter_input_set_init(struct i40e_pf *pf) input_set = i40e_get_default_input_set(pctype); - num = i40e_generate_inset_mask_reg(input_set, mask_reg, + num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg, I40E_INSET_MASK_NUM_REG); if (num < 0) return; @@ -10144,57 +9647,32 @@ i40e_filter_input_set_init(struct i40e_pf *pf) } int -i40e_hash_filter_inset_select(struct i40e_hw *hw, - struct rte_eth_input_set_conf *conf) +i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set, + uint32_t pctype, bool add) { struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; - enum i40e_filter_pctype pctype; - uint64_t input_set, inset_reg = 0; uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0}; - int ret, i, num; - - if (!conf) { - PMD_DRV_LOG(ERR, "Invalid pointer"); - return -EFAULT; - } - if (conf->op != RTE_ETH_INPUT_SET_SELECT && - conf->op != RTE_ETH_INPUT_SET_ADD) { - PMD_DRV_LOG(ERR, "Unsupported input set operation"); - return -EINVAL; - } + uint64_t inset_reg = 0; + int num, i; if (pf->support_multi_driver) { - PMD_DRV_LOG(ERR, "Hash input set setting is not supported."); - return -ENOTSUP; - } - - pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type); - if (pctype == I40E_FILTER_PCTYPE_INVALID) { - PMD_DRV_LOG(ERR, "invalid flow_type input."); - return -EINVAL; - } - - if (hw->mac.type == I40E_MAC_X722) { - /* get translated pctype value in fd pctype register */ - pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw, - I40E_GLQF_FD_PCTYPES((int)pctype)); + PMD_DRV_LOG(ERR, + "Modify input set is not permitted when multi-driver enabled."); + return -EPERM; } - ret = i40e_parse_input_set(&input_set, pctype, conf->field, - conf->inset_size); - if (ret) { - PMD_DRV_LOG(ERR, "Failed to parse input set"); - return -EINVAL; - } + /* For X722, get translated pctype in fd pctype register */ + if (hw->mac.type == I40E_MAC_X722) + pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype)); - if (conf->op == RTE_ETH_INPUT_SET_ADD) { + if (add) { /* get inset value in register */ inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype)); inset_reg <<= I40E_32_BIT_WIDTH; inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype)); input_set |= pf->hash_input_set[pctype]; } - num = i40e_generate_inset_mask_reg(input_set, mask_reg, + num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg, I40E_INSET_MASK_NUM_REG); if (num < 0) return -EINVAL; @@ -10220,181 +9698,6 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, return 0; } -int -i40e_fdir_filter_inset_select(struct i40e_pf *pf, - struct rte_eth_input_set_conf *conf) -{ - struct i40e_hw *hw = I40E_PF_TO_HW(pf); - enum i40e_filter_pctype pctype; - uint64_t input_set, inset_reg = 0; - uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0}; - int ret, i, num; - - if (!hw || !conf) { - PMD_DRV_LOG(ERR, "Invalid pointer"); - return -EFAULT; - } - if (conf->op != RTE_ETH_INPUT_SET_SELECT && - conf->op != RTE_ETH_INPUT_SET_ADD) { - PMD_DRV_LOG(ERR, "Unsupported input set operation"); - return -EINVAL; - } - - pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type); - - if (pctype == I40E_FILTER_PCTYPE_INVALID) { - PMD_DRV_LOG(ERR, "invalid flow_type input."); - return -EINVAL; - } - - ret = i40e_parse_input_set(&input_set, pctype, conf->field, - conf->inset_size); - if (ret) { - PMD_DRV_LOG(ERR, "Failed to parse input set"); - return -EINVAL; - } - - /* get inset value in register */ - inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1)); - inset_reg <<= I40E_32_BIT_WIDTH; - inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0)); - - /* Can not change the inset reg for flex payload for fdir, - * it is done by writing I40E_PRTQF_FD_FLXINSET - * in i40e_set_flex_mask_on_pctype. - */ - if (conf->op == RTE_ETH_INPUT_SET_SELECT) - inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS; - else - input_set |= pf->fdir.input_set[pctype]; - num = i40e_generate_inset_mask_reg(input_set, mask_reg, - I40E_INSET_MASK_NUM_REG); - if (num < 0) - return -EINVAL; - if (pf->support_multi_driver && num > 0) { - PMD_DRV_LOG(ERR, "FDIR bit mask is not supported."); - return -ENOTSUP; - } - - inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set); - - i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0), - (uint32_t)(inset_reg & UINT32_MAX)); - i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1), - (uint32_t)((inset_reg >> - I40E_32_BIT_WIDTH) & UINT32_MAX)); - - if (!pf->support_multi_driver) { - for (i = 0; i < num; i++) - i40e_check_write_global_reg(hw, - I40E_GLQF_FD_MSK(i, pctype), - mask_reg[i]); - /*clear unused mask registers of the pctype */ - for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) - i40e_check_write_global_reg(hw, - I40E_GLQF_FD_MSK(i, pctype), - 0); - } else { - PMD_DRV_LOG(ERR, "FDIR bit mask is not supported."); - } - I40E_WRITE_FLUSH(hw); - - pf->fdir.input_set[pctype] = input_set; - return 0; -} - -static int -i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info) -{ - int ret = 0; - - if (!hw || !info) { - PMD_DRV_LOG(ERR, "Invalid pointer"); - return -EFAULT; - } - - switch (info->info_type) { - case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT: - i40e_get_symmetric_hash_enable_per_port(hw, - &(info->info.enable)); - break; - case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG: - ret = i40e_get_hash_filter_global_config(hw, - &(info->info.global_conf)); - break; - default: - PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported", - info->info_type); - ret = -EINVAL; - break; - } - - return ret; -} - -static int -i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info) -{ - int ret = 0; - - if (!hw || !info) { - PMD_DRV_LOG(ERR, "Invalid pointer"); - return -EFAULT; - } - - switch (info->info_type) { - case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT: - i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable); - break; - case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG: - ret = i40e_set_hash_filter_global_config(hw, - &(info->info.global_conf)); - break; - case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT: - ret = i40e_hash_filter_inset_select(hw, - &(info->info.input_set_conf)); - break; - - default: - PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported", - info->info_type); - ret = -EINVAL; - break; - } - - return ret; -} - -/* Operations for hash function */ -static int -i40e_hash_filter_ctrl(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) -{ - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret = 0; - - switch (filter_op) { - case RTE_ETH_FILTER_NOP: - break; - case RTE_ETH_FILTER_GET: - ret = i40e_hash_filter_get(hw, - (struct rte_eth_hash_filter_info *)arg); - break; - case RTE_ETH_FILTER_SET: - ret = i40e_hash_filter_set(hw, - (struct rte_eth_hash_filter_info *)arg); - break; - default: - PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported", - filter_op); - ret = -ENOTSUP; - break; - } - - return ret; -} - /* Convert ethertype filter structure */ static int i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input, @@ -10409,7 +9712,7 @@ i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input, return 0; } -/* Check if there exists the ehtertype filter */ +/* Check if there exists the ethertype filter */ struct i40e_ethertype_filter * i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule, const struct i40e_ethertype_filter_input *input) @@ -10560,89 +9863,15 @@ i40e_ethertype_filter_set(struct i40e_pf *pf, return ret; } -/* - * Handle operations for ethertype filter. - */ static int -i40e_ethertype_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) +i40e_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops) { - struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - int ret = 0; - - if (filter_op == RTE_ETH_FILTER_NOP) - return ret; - - if (arg == NULL) { - PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", - filter_op); - return -EINVAL; - } - - switch (filter_op) { - case RTE_ETH_FILTER_ADD: - ret = i40e_ethertype_filter_set(pf, - (struct rte_eth_ethertype_filter *)arg, - TRUE); - break; - case RTE_ETH_FILTER_DELETE: - ret = i40e_ethertype_filter_set(pf, - (struct rte_eth_ethertype_filter *)arg, - FALSE); - break; - default: - PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); - ret = -ENOSYS; - break; - } - return ret; -} - -static int -i40e_dev_filter_ctrl(struct rte_eth_dev *dev, - enum rte_filter_type filter_type, - enum rte_filter_op filter_op, - void *arg) -{ - int ret = 0; - if (dev == NULL) return -EINVAL; - switch (filter_type) { - case RTE_ETH_FILTER_NONE: - /* For global configuration */ - ret = i40e_filter_ctrl_global_config(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_HASH: - ret = i40e_hash_filter_ctrl(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_MACVLAN: - ret = i40e_mac_filter_handle(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_ETHERTYPE: - ret = i40e_ethertype_filter_handle(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_TUNNEL: - ret = i40e_tunnel_filter_handle(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_FDIR: - ret = i40e_fdir_ctrl_func(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_GENERIC: - if (filter_op != RTE_ETH_FILTER_GET) - return -EINVAL; - *(const void **)arg = &i40e_flow_ops; - break; - default: - PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", - filter_type); - ret = -EINVAL; - break; - } - - return ret; + *ops = &i40e_flow_ops; + return 0; } /* @@ -10930,369 +10159,52 @@ i40e_configure_registers(struct i40e_hw *hw) } PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of " "0x%"PRIx32, reg_table[i].val, reg_table[i].addr); - } -} - -#define I40E_VSI_TSR_QINQ_CONFIG 0xc030 -#define I40E_VSI_L2TAGSTXVALID(_i) (0x00042800 + ((_i) * 4)) -#define I40E_VSI_L2TAGSTXVALID_QINQ 0xab -static int -i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi) -{ - uint32_t reg; - int ret; - - if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) { - PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum"); - return -EINVAL; - } - - /* Configure for double VLAN RX stripping */ - reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id)); - if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) { - reg |= I40E_VSI_TSR_QINQ_CONFIG; - ret = i40e_aq_debug_write_register(hw, - I40E_VSI_TSR(vsi->vsi_id), - reg, NULL); - if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]", - vsi->vsi_id); - return I40E_ERR_CONFIG; - } - } - - /* Configure for double VLAN TX insertion */ - reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id)); - if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) { - reg = I40E_VSI_L2TAGSTXVALID_QINQ; - ret = i40e_aq_debug_write_register(hw, - I40E_VSI_L2TAGSTXVALID( - vsi->vsi_id), reg, NULL); - if (ret < 0) { - PMD_DRV_LOG(ERR, - "Failed to update VSI_L2TAGSTXVALID[%d]", - vsi->vsi_id); - return I40E_ERR_CONFIG; - } - } - - return 0; -} - -/** - * i40e_aq_add_mirror_rule - * @hw: pointer to the hardware structure - * @seid: VEB seid to add mirror rule to - * @dst_id: destination vsi seid - * @entries: Buffer which contains the entities to be mirrored - * @count: number of entities contained in the buffer - * @rule_id:the rule_id of the rule to be added - * - * Add a mirror rule for a given veb. - * - **/ -static enum i40e_status_code -i40e_aq_add_mirror_rule(struct i40e_hw *hw, - uint16_t seid, uint16_t dst_id, - uint16_t rule_type, uint16_t *entries, - uint16_t count, uint16_t *rule_id) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_add_delete_mirror_rule cmd; - struct i40e_aqc_add_delete_mirror_rule_completion *resp = - (struct i40e_aqc_add_delete_mirror_rule_completion *) - &desc.params.raw; - uint16_t buff_len; - enum i40e_status_code status; - - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_add_mirror_rule); - memset(&cmd, 0, sizeof(cmd)); - - buff_len = sizeof(uint16_t) * count; - desc.datalen = rte_cpu_to_le_16(buff_len); - if (buff_len > 0) - desc.flags |= rte_cpu_to_le_16( - (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); - cmd.rule_type = rte_cpu_to_le_16(rule_type << - I40E_AQC_MIRROR_RULE_TYPE_SHIFT); - cmd.num_entries = rte_cpu_to_le_16(count); - cmd.seid = rte_cpu_to_le_16(seid); - cmd.destination = rte_cpu_to_le_16(dst_id); - - rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd)); - status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL); - PMD_DRV_LOG(INFO, - "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,", - hw->aq.asq_last_status, resp->rule_id, - resp->mirror_rules_used, resp->mirror_rules_free); - *rule_id = rte_le_to_cpu_16(resp->rule_id); - - return status; -} - -/** - * i40e_aq_del_mirror_rule - * @hw: pointer to the hardware structure - * @seid: VEB seid to add mirror rule to - * @entries: Buffer which contains the entities to be mirrored - * @count: number of entities contained in the buffer - * @rule_id:the rule_id of the rule to be delete - * - * Delete a mirror rule for a given veb. - * - **/ -static enum i40e_status_code -i40e_aq_del_mirror_rule(struct i40e_hw *hw, - uint16_t seid, uint16_t rule_type, uint16_t *entries, - uint16_t count, uint16_t rule_id) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_add_delete_mirror_rule cmd; - uint16_t buff_len = 0; - enum i40e_status_code status; - void *buff = NULL; - - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_delete_mirror_rule); - memset(&cmd, 0, sizeof(cmd)); - if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { - desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF | - I40E_AQ_FLAG_RD)); - cmd.num_entries = count; - buff_len = sizeof(uint16_t) * count; - desc.datalen = rte_cpu_to_le_16(buff_len); - buff = (void *)entries; - } else - /* rule id is filled in destination field for deleting mirror rule */ - cmd.destination = rte_cpu_to_le_16(rule_id); - - cmd.rule_type = rte_cpu_to_le_16(rule_type << - I40E_AQC_MIRROR_RULE_TYPE_SHIFT); - cmd.seid = rte_cpu_to_le_16(seid); - - rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd)); - status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL); - - return status; -} - -/** - * i40e_mirror_rule_set - * @dev: pointer to the hardware structure - * @mirror_conf: mirror rule info - * @sw_id: mirror rule's sw_id - * @on: enable/disable - * - * set a mirror rule. - * - **/ -static int -i40e_mirror_rule_set(struct rte_eth_dev *dev, - struct rte_eth_mirror_conf *mirror_conf, - uint8_t sw_id, uint8_t on) -{ - struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct i40e_mirror_rule *it, *mirr_rule = NULL; - struct i40e_mirror_rule *parent = NULL; - uint16_t seid, dst_seid, rule_id; - uint16_t i, j = 0; - int ret; - - PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id); - - if (pf->main_vsi->veb == NULL || pf->vfs == NULL) { - PMD_DRV_LOG(ERR, - "mirror rule can not be configured without veb or vfs."); - return -ENOSYS; - } - if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) { - PMD_DRV_LOG(ERR, "mirror table is full."); - return -ENOSPC; - } - if (mirror_conf->dst_pool > pf->vf_num) { - PMD_DRV_LOG(ERR, "invalid destination pool %u.", - mirror_conf->dst_pool); - return -EINVAL; - } - - seid = pf->main_vsi->veb->seid; - - TAILQ_FOREACH(it, &pf->mirror_list, rules) { - if (sw_id <= it->index) { - mirr_rule = it; - break; - } - parent = it; - } - if (mirr_rule && sw_id == mirr_rule->index) { - if (on) { - PMD_DRV_LOG(ERR, "mirror rule exists."); - return -EEXIST; - } else { - ret = i40e_aq_del_mirror_rule(hw, seid, - mirr_rule->rule_type, - mirr_rule->entries, - mirr_rule->num_entries, mirr_rule->id); - if (ret < 0) { - PMD_DRV_LOG(ERR, - "failed to remove mirror rule: ret = %d, aq_err = %d.", - ret, hw->aq.asq_last_status); - return -ENOSYS; - } - TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules); - rte_free(mirr_rule); - pf->nb_mirror_rule--; - return 0; - } - } else if (!on) { - PMD_DRV_LOG(ERR, "mirror rule doesn't exist."); - return -ENOENT; - } - - mirr_rule = rte_zmalloc("i40e_mirror_rule", - sizeof(struct i40e_mirror_rule) , 0); - if (!mirr_rule) { - PMD_DRV_LOG(ERR, "failed to allocate memory"); - return I40E_ERR_NO_MEMORY; - } - switch (mirror_conf->rule_type) { - case ETH_MIRROR_VLAN: - for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) { - if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { - mirr_rule->entries[j] = - mirror_conf->vlan.vlan_id[i]; - j++; - } - } - if (j == 0) { - PMD_DRV_LOG(ERR, "vlan is not specified."); - rte_free(mirr_rule); - return -EINVAL; - } - mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN; - break; - case ETH_MIRROR_VIRTUAL_POOL_UP: - case ETH_MIRROR_VIRTUAL_POOL_DOWN: - /* check if the specified pool bit is out of range */ - if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) { - PMD_DRV_LOG(ERR, "pool mask is out of range."); - rte_free(mirr_rule); - return -EINVAL; - } - for (i = 0, j = 0; i < pf->vf_num; i++) { - if (mirror_conf->pool_mask & (1ULL << i)) { - mirr_rule->entries[j] = pf->vfs[i].vsi->seid; - j++; - } - } - if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) { - /* add pf vsi to entries */ - mirr_rule->entries[j] = pf->main_vsi_seid; - j++; - } - if (j == 0) { - PMD_DRV_LOG(ERR, "pool is not specified."); - rte_free(mirr_rule); - return -EINVAL; - } - /* egress and ingress in aq commands means from switch but not port */ - mirr_rule->rule_type = - (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ? - I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS : - I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS; - break; - case ETH_MIRROR_UPLINK_PORT: - /* egress and ingress in aq commands means from switch but not port*/ - mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS; - break; - case ETH_MIRROR_DOWNLINK_PORT: - mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS; - break; - default: - PMD_DRV_LOG(ERR, "unsupported mirror type %d.", - mirror_conf->rule_type); - rte_free(mirr_rule); - return -EINVAL; - } - - /* If the dst_pool is equal to vf_num, consider it as PF */ - if (mirror_conf->dst_pool == pf->vf_num) - dst_seid = pf->main_vsi_seid; - else - dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid; - - ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid, - mirr_rule->rule_type, mirr_rule->entries, - j, &rule_id); - if (ret < 0) { - PMD_DRV_LOG(ERR, - "failed to add mirror rule: ret = %d, aq_err = %d.", - ret, hw->aq.asq_last_status); - rte_free(mirr_rule); - return -ENOSYS; - } - - mirr_rule->index = sw_id; - mirr_rule->num_entries = j; - mirr_rule->id = rule_id; - mirr_rule->dst_vsi_seid = dst_seid; - - if (parent) - TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules); - else - TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules); - - pf->nb_mirror_rule++; - return 0; -} - -/** - * i40e_mirror_rule_reset - * @dev: pointer to the device - * @sw_id: mirror rule's sw_id - * - * reset a mirror rule. - * - **/ + } +} + +#define I40E_VSI_TSR_QINQ_CONFIG 0xc030 +#define I40E_VSI_L2TAGSTXVALID(_i) (0x00042800 + ((_i) * 4)) +#define I40E_VSI_L2TAGSTXVALID_QINQ 0xab static int -i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id) +i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi) { - struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct i40e_mirror_rule *it, *mirr_rule = NULL; - uint16_t seid; + uint32_t reg; int ret; - PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id); - - seid = pf->main_vsi->veb->seid; + if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) { + PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum"); + return -EINVAL; + } - TAILQ_FOREACH(it, &pf->mirror_list, rules) { - if (sw_id == it->index) { - mirr_rule = it; - break; + /* Configure for double VLAN RX stripping */ + reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id)); + if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) { + reg |= I40E_VSI_TSR_QINQ_CONFIG; + ret = i40e_aq_debug_write_register(hw, + I40E_VSI_TSR(vsi->vsi_id), + reg, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]", + vsi->vsi_id); + return I40E_ERR_CONFIG; } } - if (mirr_rule) { - ret = i40e_aq_del_mirror_rule(hw, seid, - mirr_rule->rule_type, - mirr_rule->entries, - mirr_rule->num_entries, mirr_rule->id); + + /* Configure for double VLAN TX insertion */ + reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id)); + if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) { + reg = I40E_VSI_L2TAGSTXVALID_QINQ; + ret = i40e_aq_debug_write_register(hw, + I40E_VSI_L2TAGSTXVALID( + vsi->vsi_id), reg, NULL); if (ret < 0) { PMD_DRV_LOG(ERR, - "failed to remove mirror rule: status = %d, aq_err = %d.", - ret, hw->aq.asq_last_status); - return -ENOSYS; + "Failed to update VSI_L2TAGSTXVALID[%d]", + vsi->vsi_id); + return I40E_ERR_CONFIG; } - TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules); - rte_free(mirr_rule); - pf->nb_mirror_rule--; - } else { - PMD_DRV_LOG(ERR, "mirror rule doesn't exist."); - return -ENOENT; } + return 0; } @@ -11349,16 +10261,16 @@ i40e_start_timecounters(struct rte_eth_dev *dev) rte_eth_linkstatus_get(dev, &link); switch (link.link_speed) { - case ETH_SPEED_NUM_40G: - case ETH_SPEED_NUM_25G: + case RTE_ETH_SPEED_NUM_40G: + case RTE_ETH_SPEED_NUM_25G: tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF; tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32; break; - case ETH_SPEED_NUM_10G: + case RTE_ETH_SPEED_NUM_10G: tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF; tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32; break; - case ETH_SPEED_NUM_1G: + case RTE_ETH_SPEED_NUM_1G: tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF; tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32; break; @@ -11586,7 +10498,7 @@ i40e_parse_dcb_configure(struct rte_eth_dev *dev, else *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t); - if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) { + if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) { dcb_cfg->pfc.willing = 0; dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; dcb_cfg->pfc.pfcenable = *tc_map; @@ -12094,7 +11006,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev, uint16_t bsf, tc_mapping; int i, j = 0; - if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) + if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1); else dcb_info->nb_tcs = 1; @@ -12142,7 +11054,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev, dcb_info->tc_queue.tc_rxq[j][i].nb_queue; } j++; - } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL)); + } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, RTE_ETH_MAX_VMDQ_POOL)); return 0; } @@ -12150,11 +11062,11 @@ static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t msix_intr; - msix_intr = intr_handle->intr_vec[queue_id]; + msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id); if (msix_intr == I40E_MISC_VEC_ID) I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_INTENA_MASK | @@ -12169,7 +11081,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); I40E_WRITE_FLUSH(hw); - rte_intr_ack(&pci_dev->intr_handle); + rte_intr_ack(pci_dev->intr_handle); return 0; } @@ -12178,11 +11090,11 @@ static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t msix_intr; - msix_intr = intr_handle->intr_vec[queue_id]; + msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id); if (msix_intr == I40E_MISC_VEC_ID) I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); @@ -12418,9 +11330,6 @@ static int i40e_get_module_eeprom(struct rte_eth_dev *dev, uint32_t value = 0; uint32_t i; - if (!info || !info->length || !info->data) - return -EINVAL; - if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP) is_sfp = true; @@ -12503,34 +11412,16 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, } static int -i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused) { - struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct rte_eth_dev_data *dev_data = pf->dev_data; - uint32_t frame_size = mtu + I40E_ETH_OVERHEAD; - int ret = 0; - - /* check if mtu is within the allowed range */ - if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX) - return -EINVAL; - /* mtu setting is forbidden if port is start */ - if (dev_data->dev_started) { + if (dev->data->dev_started != 0) { PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", - dev_data->port_id); + dev->data->port_id); return -EBUSY; } - if (frame_size > RTE_ETHER_MAX_LEN) - dev_data->dev_conf.rxmode.offloads |= - DEV_RX_OFFLOAD_JUMBO_FRAME; - else - dev_data->dev_conf.rxmode.offloads &= - ~DEV_RX_OFFLOAD_JUMBO_FRAME; - - dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size; - - return ret; + return 0; } /* Restore ethertype filter */ @@ -12621,25 +11512,13 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf) } } -/* Restore RSS filter */ -static inline void -i40e_rss_filter_restore(struct i40e_pf *pf) -{ - struct i40e_rss_conf_list *list = &pf->rss_config_list; - struct i40e_rss_filter *filter; - - TAILQ_FOREACH(filter, list, next) { - i40e_config_rss_filter(pf, &filter->rss_filter_info, TRUE); - } -} - static void i40e_filter_restore(struct i40e_pf *pf) { i40e_ethertype_filter_restore(pf); i40e_tunnel_filter_restore(pf); i40e_fdir_filter_restore(pf); - i40e_rss_filter_restore(pf); + (void)i40e_hash_filter_restore(pf); } bool @@ -13140,7 +12019,7 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) struct i40e_aqc_replace_cloud_filters_cmd filter_replace; struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id]; if (pf->support_multi_driver) { PMD_DRV_LOG(ERR, "Replace cloud filter is not supported."); @@ -13217,566 +12096,46 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) return ret; } -int -i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out, - const struct rte_flow_action_rss *in) -{ - if (in->key_len > RTE_DIM(out->key) || - in->queue_num > RTE_DIM(out->queue)) - return -EINVAL; - if (!in->key && in->key_len) - return -EINVAL; - out->conf = (struct rte_flow_action_rss){ - .func = in->func, - .level = in->level, - .types = in->types, - .key_len = in->key_len, - .queue_num = in->queue_num, - .queue = memcpy(out->queue, in->queue, - sizeof(*in->queue) * in->queue_num), - }; - if (in->key) - out->conf.key = memcpy(out->key, in->key, in->key_len); - return 0; -} - -/* Write HENA register to enable hash */ -static int -i40e_rss_hash_set(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *rss_conf) -{ - struct i40e_hw *hw = I40E_PF_TO_HW(pf); - uint8_t *key = (void *)(uintptr_t)rss_conf->conf.key; - uint64_t hena; - int ret; - - ret = i40e_set_rss_key(pf->main_vsi, key, - rss_conf->conf.key_len); - if (ret) - return ret; - - hena = i40e_config_hena(pf->adapter, rss_conf->conf.types); - i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); - i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); - I40E_WRITE_FLUSH(hw); - - return 0; -} - -/* Configure hash input set */ -static int -i40e_rss_conf_hash_inset(struct i40e_pf *pf, uint64_t types) -{ - struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_eth_input_set_conf conf; - uint64_t mask0; - int ret = 0; - uint32_t j; - int i; - static const struct { - uint64_t type; - enum rte_eth_input_set_field field; - } inset_match_table[] = { - {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP4}, - {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP4}, - {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_UNKNOWN}, - {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_UNKNOWN}, - - {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP4}, - {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP4}, - {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT}, - {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_L4_TCP_DST_PORT}, - - {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP4}, - {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP4}, - {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT}, - {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_L4_UDP_DST_PORT}, - - {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP4}, - {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP4}, - {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT}, - {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT}, - - {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP4}, - {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP4}, - {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_UNKNOWN}, - {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_UNKNOWN}, - - {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP6}, - {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP6}, - {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_UNKNOWN}, - {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_UNKNOWN}, - - {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP6}, - {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP6}, - {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT}, - {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_L4_TCP_DST_PORT}, - - {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP6}, - {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP6}, - {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT}, - {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_L4_UDP_DST_PORT}, - - {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP6}, - {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP6}, - {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT}, - {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT}, - - {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP6}, - {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP6}, - {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_UNKNOWN}, - {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_UNKNOWN}, - }; - - mask0 = types & pf->adapter->flow_types_mask; - conf.op = RTE_ETH_INPUT_SET_SELECT; - conf.inset_size = 0; - for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; i++) { - if (mask0 & (1ULL << i)) { - conf.flow_type = i; - break; - } - } - - for (j = 0; j < RTE_DIM(inset_match_table); j++) { - if ((types & inset_match_table[j].type) == - inset_match_table[j].type) { - if (inset_match_table[j].field == - RTE_ETH_INPUT_SET_UNKNOWN) - return -EINVAL; - - conf.field[conf.inset_size] = - inset_match_table[j].field; - conf.inset_size++; - } - } - - if (conf.inset_size) { - ret = i40e_hash_filter_inset_select(hw, &conf); - if (ret) - return ret; - } - - return ret; -} - -/* Look up the conflicted rule then mark it as invalid */ static void -i40e_rss_mark_invalid_rule(struct i40e_pf *pf, - struct i40e_rte_flow_rss_conf *conf) -{ - struct i40e_rss_filter *rss_item; - uint64_t rss_inset; - - /* Clear input set bits before comparing the pctype */ - rss_inset = ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | - ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY); - - /* Look up the conflicted rule then mark it as invalid */ - TAILQ_FOREACH(rss_item, &pf->rss_config_list, next) { - if (!rss_item->rss_filter_info.valid) - continue; - - if (conf->conf.queue_num && - rss_item->rss_filter_info.conf.queue_num) - rss_item->rss_filter_info.valid = false; - - if (conf->conf.types && - (rss_item->rss_filter_info.conf.types & - rss_inset) == - (conf->conf.types & rss_inset)) - rss_item->rss_filter_info.valid = false; - - if (conf->conf.func == - RTE_ETH_HASH_FUNCTION_SIMPLE_XOR && - rss_item->rss_filter_info.conf.func == - RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) - rss_item->rss_filter_info.valid = false; - } -} - -/* Configure RSS hash function */ -static int -i40e_rss_config_hash_function(struct i40e_pf *pf, - struct i40e_rte_flow_rss_conf *conf) -{ - struct i40e_hw *hw = I40E_PF_TO_HW(pf); - uint32_t reg, i; - uint64_t mask0; - uint16_t j; - - if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { - reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); - if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) { - PMD_DRV_LOG(DEBUG, "Hash function already set to Simple XOR"); - I40E_WRITE_FLUSH(hw); - i40e_rss_mark_invalid_rule(pf, conf); - - return 0; - } - reg &= ~I40E_GLQF_CTL_HTOEP_MASK; - - i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg); - I40E_WRITE_FLUSH(hw); - i40e_rss_mark_invalid_rule(pf, conf); - } else if (conf->conf.func == - RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) { - mask0 = conf->conf.types & pf->adapter->flow_types_mask; - - i40e_set_symmetric_hash_enable_per_port(hw, 1); - for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) { - if (mask0 & (1UL << i)) - break; - } - - if (i == UINT64_BIT) - return -EINVAL; - - for (j = I40E_FILTER_PCTYPE_INVALID + 1; - j < I40E_FILTER_PCTYPE_MAX; j++) { - if (pf->adapter->pctypes_tbl[i] & (1ULL << j)) - i40e_write_global_rx_ctl(hw, - I40E_GLQF_HSYM(j), - I40E_GLQF_HSYM_SYMH_ENA_MASK); - } - } - - return 0; -} - -/* Enable RSS according to the configuration */ -static int -i40e_rss_enable_hash(struct i40e_pf *pf, - struct i40e_rte_flow_rss_conf *conf) -{ - struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; - struct i40e_rte_flow_rss_conf rss_conf; - - if (!(conf->conf.types & pf->adapter->flow_types_mask)) - return -ENOTSUP; - - memset(&rss_conf, 0, sizeof(rss_conf)); - rte_memcpy(&rss_conf, conf, sizeof(rss_conf)); - - /* Configure hash input set */ - if (i40e_rss_conf_hash_inset(pf, conf->conf.types)) - return -EINVAL; - - if (rss_conf.conf.key == NULL || rss_conf.conf.key_len < - (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) { - /* Random default keys */ - static uint32_t rss_key_default[] = {0x6b793944, - 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8, - 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605, - 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581}; - - rss_conf.conf.key = (uint8_t *)rss_key_default; - rss_conf.conf.key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * - sizeof(uint32_t); - PMD_DRV_LOG(INFO, - "No valid RSS key config for i40e, using default\n"); - } - - rss_conf.conf.types |= rss_info->conf.types; - i40e_rss_hash_set(pf, &rss_conf); - - if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) - i40e_rss_config_hash_function(pf, conf); - - i40e_rss_mark_invalid_rule(pf, conf); - - return 0; -} - -/* Configure RSS queue region */ -static int -i40e_rss_config_queue_region(struct i40e_pf *pf, - struct i40e_rte_flow_rss_conf *conf) -{ - struct i40e_hw *hw = I40E_PF_TO_HW(pf); - uint32_t lut = 0; - uint16_t j, num; - uint32_t i; - - /* If both VMDQ and RSS enabled, not all of PF queues are configured. - * It's necessary to calculate the actual PF queues that are configured. - */ - if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) - num = i40e_pf_calc_configured_queues_num(pf); - else - num = pf->dev_data->nb_rx_queues; - - num = RTE_MIN(num, conf->conf.queue_num); - PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured", - num); - - if (num == 0) { - PMD_DRV_LOG(ERR, - "No PF queues are configured to enable RSS for port %u", - pf->dev_data->port_id); - return -ENOTSUP; - } - - /* Fill in redirection table */ - for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { - if (j == num) - j = 0; - lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 << - hw->func_caps.rss_table_entry_width) - 1)); - if ((i & 3) == 3) - I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut); - } - - i40e_rss_mark_invalid_rule(pf, conf); - - return 0; -} - -/* Configure RSS hash function to default */ -static int -i40e_rss_clear_hash_function(struct i40e_pf *pf, - struct i40e_rte_flow_rss_conf *conf) +i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size) { - struct i40e_hw *hw = I40E_PF_TO_HW(pf); - uint32_t i, reg; - uint64_t mask0; - uint16_t j; - - if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { - reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); - if (reg & I40E_GLQF_CTL_HTOEP_MASK) { - PMD_DRV_LOG(DEBUG, - "Hash function already set to Toeplitz"); - I40E_WRITE_FLUSH(hw); - - return 0; - } - reg |= I40E_GLQF_CTL_HTOEP_MASK; - - i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg); - I40E_WRITE_FLUSH(hw); - } else if (conf->conf.func == - RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) { - mask0 = conf->conf.types & pf->adapter->flow_types_mask; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rep_cnt = MAX_REPEAT_TIME; + struct rte_eth_link link; + enum i40e_status_code status; + bool can_be_set = true; - for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) { - if (mask0 & (1UL << i)) + /* I40E_MEDIA_TYPE_BASET link up can be ignored */ + if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET) { + do { + update_link_reg(hw, &link); + if (link.link_status) break; - } - - if (i == UINT64_BIT) - return -EINVAL; - - for (j = I40E_FILTER_PCTYPE_INVALID + 1; - j < I40E_FILTER_PCTYPE_MAX; j++) { - if (pf->adapter->pctypes_tbl[i] & (1ULL << j)) - i40e_write_global_rx_ctl(hw, - I40E_GLQF_HSYM(j), - 0); - } - } - - return 0; -} - -/* Disable RSS hash and configure default input set */ -static int -i40e_rss_disable_hash(struct i40e_pf *pf, - struct i40e_rte_flow_rss_conf *conf) -{ - struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; - struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct i40e_rte_flow_rss_conf rss_conf; - uint32_t i; - - memset(&rss_conf, 0, sizeof(rss_conf)); - rte_memcpy(&rss_conf, conf, sizeof(rss_conf)); - - /* Disable RSS hash */ - rss_conf.conf.types = rss_info->conf.types & ~(conf->conf.types); - i40e_rss_hash_set(pf, &rss_conf); - - for (i = RTE_ETH_FLOW_IPV4; i <= RTE_ETH_FLOW_L2_PAYLOAD; i++) { - if (!(pf->adapter->flow_types_mask & (1ULL << i)) || - !(conf->conf.types & (1ULL << i))) - continue; - - /* Configure default input set */ - struct rte_eth_input_set_conf input_conf = { - .op = RTE_ETH_INPUT_SET_SELECT, - .flow_type = i, - .inset_size = 1, - }; - input_conf.field[0] = RTE_ETH_INPUT_SET_DEFAULT; - i40e_hash_filter_inset_select(hw, &input_conf); - } - - rss_info->conf.types = rss_conf.conf.types; - - i40e_rss_clear_hash_function(pf, conf); - - return 0; -} - -/* Configure RSS queue region to default */ -static int -i40e_rss_clear_queue_region(struct i40e_pf *pf) -{ - struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; - uint16_t queue[I40E_MAX_Q_PER_TC]; - uint32_t num_rxq, i; - uint32_t lut = 0; - uint16_t j, num; - - num_rxq = RTE_MIN(pf->dev_data->nb_rx_queues, I40E_MAX_Q_PER_TC); - - for (j = 0; j < num_rxq; j++) - queue[j] = j; - - /* If both VMDQ and RSS enabled, not all of PF queues are configured. - * It's necessary to calculate the actual PF queues that are configured. - */ - if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) - num = i40e_pf_calc_configured_queues_num(pf); - else - num = pf->dev_data->nb_rx_queues; - - num = RTE_MIN(num, num_rxq); - PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured", - num); - - if (num == 0) { - PMD_DRV_LOG(ERR, - "No PF queues are configured to enable RSS for port %u", - pf->dev_data->port_id); - return -ENOTSUP; - } - - /* Fill in redirection table */ - for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { - if (j == num) - j = 0; - lut = (lut << 8) | (queue[j] & ((0x1 << - hw->func_caps.rss_table_entry_width) - 1)); - if ((i & 3) == 3) - I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut); + rte_delay_ms(CHECK_INTERVAL); + } while (--rep_cnt); + can_be_set = !!link.link_status; } - rss_info->conf.queue_num = 0; - memset(&rss_info->conf.queue, 0, sizeof(uint16_t)); - - return 0; -} - -int -i40e_config_rss_filter(struct i40e_pf *pf, - struct i40e_rte_flow_rss_conf *conf, bool add) -{ - struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; - struct rte_flow_action_rss update_conf = rss_info->conf; - int ret = 0; - - if (add) { - if (conf->conf.queue_num) { - /* Configure RSS queue region */ - ret = i40e_rss_config_queue_region(pf, conf); - if (ret) - return ret; - - update_conf.queue_num = conf->conf.queue_num; - update_conf.queue = conf->conf.queue; - } else if (conf->conf.func == - RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { - /* Configure hash function */ - ret = i40e_rss_config_hash_function(pf, conf); - if (ret) - return ret; - - update_conf.func = conf->conf.func; - } else { - /* Configure hash enable and input set */ - ret = i40e_rss_enable_hash(pf, conf); - if (ret) - return ret; - - update_conf.types |= conf->conf.types; - update_conf.key = conf->conf.key; - update_conf.key_len = conf->conf.key_len; - } - - /* Update RSS info in pf */ - if (i40e_rss_conf_init(rss_info, &update_conf)) - return -EINVAL; + if (can_be_set) { + status = i40e_aq_set_mac_config(hw, size, TRUE, 0, false, NULL); + if (status != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to set max frame size at port level"); } else { - if (!conf->valid) - return 0; - - if (conf->conf.queue_num) - i40e_rss_clear_queue_region(pf); - else if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) - i40e_rss_clear_hash_function(pf, conf); - else - i40e_rss_disable_hash(pf, conf); + PMD_DRV_LOG(ERR, "Set max frame size at port level not applicable on link down"); } - - return 0; } -RTE_LOG_REGISTER(i40e_logtype_init, pmd.net.i40e.init, NOTICE); -RTE_LOG_REGISTER(i40e_logtype_driver, pmd.net.i40e.driver, NOTICE); -#ifdef RTE_LIBRTE_I40E_DEBUG_RX -RTE_LOG_REGISTER(i40e_logtype_rx, pmd.net.i40e.rx, DEBUG); -#endif -#ifdef RTE_LIBRTE_I40E_DEBUG_TX -RTE_LOG_REGISTER(i40e_logtype_tx, pmd.net.i40e.tx, DEBUG); +RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE); +RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE); +#ifdef RTE_ETHDEV_DEBUG_RX +RTE_LOG_REGISTER_SUFFIX(i40e_logtype_rx, rx, DEBUG); #endif -#ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE -RTE_LOG_REGISTER(i40e_logtype_tx_free, pmd.net.i40e.tx_free, DEBUG); +#ifdef RTE_ETHDEV_DEBUG_TX +RTE_LOG_REGISTER_SUFFIX(i40e_logtype_tx, tx, DEBUG); #endif RTE_PMD_REGISTER_PARAM_STRING(net_i40e, ETH_I40E_FLOATING_VEB_ARG "=1" ETH_I40E_FLOATING_VEB_LIST_ARG "=" ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16" - ETH_I40E_SUPPORT_MULTI_DRIVER "=1" - ETH_I40E_USE_LATEST_VEC "=0|1"); + ETH_I40E_SUPPORT_MULTI_DRIVER "=1");