X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_i40e%2Fi40e_ethdev.c;h=bb3223b75746adc01db80d72aa676778140127de;hb=8bae1da2afe0;hp=4ebb981654b244ac8d5463b8b701b9c7638a07f6;hpb=71d35259ff67427dbebde9badd0d0efed71c134a;p=dpdk.git diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c index 4ebb981654..bb3223b757 100644 --- a/lib/librte_pmd_i40e/i40e_ethdev.c +++ b/lib/librte_pmd_i40e/i40e_ethdev.c @@ -73,7 +73,7 @@ /* Maximun number of VSI */ #define I40E_MAX_NUM_VSIS (384UL) -/* Default queue interrupt throttling time in microseconds*/ +/* Default queue interrupt throttling time in microseconds */ #define I40E_ITR_INDEX_DEFAULT 0 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ @@ -93,6 +93,19 @@ I40E_PFINT_ICR0_ENA_VFLR_MASK | \ I40E_PFINT_ICR0_ENA_ADMINQ_MASK) +#define I40E_FLOW_TYPES ( \ + (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \ + (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \ + (1UL << RTE_ETH_FLOW_L2_PAYLOAD)) + static int eth_i40e_dev_init(\ __attribute__((unused)) struct eth_driver *eth_drv, struct rte_eth_dev *eth_dev); @@ -188,13 +201,18 @@ static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel); static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel); +static int i40e_ethertype_filter_set(struct i40e_pf *pf, + struct rte_eth_ethertype_filter *filter, + bool add); +static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, enum rte_filter_op filter_op, void *arg); - -/* Default hash key buffer for RSS */ -static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1]; +static void i40e_configure_registers(struct i40e_hw *hw); +static void i40e_hw_init(struct i40e_hw *hw); static struct rte_pci_id pci_id_i40e_map[] = { #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, @@ -316,6 +334,35 @@ static struct rte_driver rte_i40e_driver = { PMD_REGISTER_DRIVER(rte_i40e_driver); +/* + * Initialize registers for flexible payload, which should be set by NVM. + * This should be removed from code once it is fixed in NVM. + */ +#ifndef I40E_GLQF_ORT +#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) +#endif +#ifndef I40E_GLQF_PIT +#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) +#endif + +static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw) +{ + I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D); + + /* GLQF_PIT Registers */ + I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480); + I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440); +} + static int eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv, struct rte_eth_dev *dev) @@ -365,6 +412,9 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv, /* Make sure all is clean before doing PF reset */ i40e_clear_hw(hw); + /* Initialize the hardware */ + i40e_hw_init(hw); + /* Reset here to make sure all is clean for each PF */ ret = i40e_pf_reset(hw); if (ret) { @@ -379,6 +429,13 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv, return ret; } + /* + * To work around the NVM issue,initialize registers + * for flexible payload by software. + * It should be removed once issues are fixed in NVM. + */ + i40e_flex_payload_reg_init(hw); + /* Initialize the parameters for adminq */ i40e_init_adminq_parameter(hw); ret = i40e_init_adminq(hw); @@ -401,6 +458,16 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv, /* Clear PXE mode */ i40e_clear_pxe_mode(hw); + /* + * On X710, performance number is far from the expectation on recent + * firmware versions. The fix for this issue may not be integrated in + * the following firmware version. So the workaround in software driver + * is needed. It needs to modify the initial values of 3 internal only + * registers. Note that the workaround can be removed when it is fixed + * in firmware in the future. + */ + i40e_configure_registers(hw); + /* Get hw capabilities */ ret = i40e_get_cap(hw); if (ret != I40E_SUCCESS) { @@ -504,7 +571,6 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv, err_mac_alloc: i40e_vsi_release(pf->main_vsi); err_setup_pf_switch: - i40e_fdir_teardown(pf); err_get_mac_addr: err_configure_lan_hmc: (void)i40e_shutdown_lan_hmc(hw); @@ -523,8 +589,27 @@ err_get_capabilities: static int i40e_dev_configure(struct rte_eth_dev *dev) { - int ret; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; + int ret; + + if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) { + ret = i40e_fdir_setup(pf); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to setup flow director."); + return -ENOTSUP; + } + ret = i40e_fdir_configure(dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "failed to configure fdir."); + goto err; + } + } else + i40e_fdir_teardown(pf); + + ret = i40e_dev_init_vlan(dev); + if (ret < 0) + goto err; /* VMDQ setup. * Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and @@ -541,10 +626,12 @@ i40e_dev_configure(struct rte_eth_dev *dev) if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) { ret = i40e_vmdq_setup(dev); if (ret) - return ret; + goto err; } - - return i40e_dev_init_vlan(dev); + return 0; +err: + i40e_fdir_teardown(pf); + return ret; } void @@ -795,14 +882,8 @@ i40e_dev_start(struct rte_eth_dev *dev) i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi); } - ret = i40e_fdir_configure(dev); - if (ret < 0) { - PMD_DRV_LOG(ERR, "failed to configure fdir."); - goto err_up; - } - /* enable FDIR MSIX interrupt */ - if (pf->flags & I40E_FLAG_FDIR) { + if (pf->fdir.fdir_vsi) { i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi); i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi); } @@ -861,7 +942,7 @@ i40e_dev_stop(struct rte_eth_dev *dev) i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi); } - if (pf->flags & I40E_FLAG_FDIR) { + if (pf->fdir.fdir_vsi) { i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi); i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi); } @@ -1307,6 +1388,9 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) I40E_GLPRT_PTC9522L(hw->port), pf->offset_loaded, &os->tx_size_big, &ns->tx_size_big); + i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index), + pf->offset_loaded, + &os->fd_sb_match, &ns->fd_sb_match); /* GLPRT_MSPDC not supported */ /* GLPRT_XEC not supported */ @@ -1323,6 +1407,7 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->obytes = ns->eth.tx_bytes; stats->oerrors = ns->eth.tx_errors; stats->imcasts = ns->eth.rx_multicast; + stats->fdirmatch = ns->fd_sb_match; /* Rx Errors */ stats->ibadcrc = ns->crc_errors; @@ -1398,6 +1483,7 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) ns->mac_short_packet_dropped); PMD_DRV_LOG(DEBUG, "checksum_error: %lu", ns->checksum_error); + PMD_DRV_LOG(DEBUG, "fdir_match: %lu", ns->fd_sb_match); PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************"); } @@ -1444,8 +1530,10 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM; + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; dev_info->reta_size = pf->hash_lut_size; + dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -1863,7 +1951,7 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev, lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2)); for (j = 0; j < I40E_4_BIT_WIDTH; j++) { if (mask & (0x1 << j)) - reta_conf[idx].reta[shift] = ((lut >> + reta_conf[idx].reta[shift + j] = ((lut >> (CHAR_BIT * j)) & I40E_8_BIT_MASK); } } @@ -3255,15 +3343,6 @@ i40e_pf_setup(struct i40e_pf *pf) } pf->main_vsi = vsi; - /* setup FDIR after main vsi created.*/ - if (pf->flags & I40E_FLAG_FDIR) { - ret = i40e_fdir_setup(pf); - if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to setup flow director."); - pf->flags &= ~I40E_FLAG_FDIR; - } - } - /* Configure filter control */ memset(&settings, 0, sizeof(settings)); if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128) @@ -4116,8 +4195,11 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi, { uint32_t vid_idx, vid_bit; - vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); - vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); + if (vlan_id > ETH_VLAN_ID_MAX) + return 0; + + vid_idx = I40E_VFTA_IDX(vlan_id); + vid_bit = I40E_VFTA_BIT(vlan_id); if (vsi->vfta[vid_idx] & vid_bit) return 1; @@ -4131,14 +4213,11 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi, { uint32_t vid_idx, vid_bit; -#define UINT32_BIT_MASK 0x1F -#define VALID_VLAN_BIT_MASK 0xFFF - /* VFTA is 32-bits size array, each element contains 32 vlan bits, Find the - * element first, then find the bits it belongs to - */ - vid_idx = (uint32_t) ((vlan_id & VALID_VLAN_BIT_MASK) >> - sizeof(uint32_t)); - vid_bit = (uint32_t) (1 << (vlan_id & UINT32_BIT_MASK)); + if (vlan_id > ETH_VLAN_ID_MAX) + return; + + vid_idx = I40E_VFTA_IDX(vlan_id); + vid_bit = I40E_VFTA_BIT(vlan_id); if (on) vsi->vfta[vid_idx] |= vid_bit; @@ -4511,26 +4590,26 @@ i40e_config_hena(uint64_t flags) if (!flags) return hena; - if (flags & ETH_RSS_NONF_IPV4_UDP) - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP; - if (flags & ETH_RSS_NONF_IPV4_TCP) + if (flags & ETH_RSS_FRAG_IPV4) + hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4; + if (flags & ETH_RSS_NONFRAG_IPV4_TCP) hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP; - if (flags & ETH_RSS_NONF_IPV4_SCTP) + if (flags & ETH_RSS_NONFRAG_IPV4_UDP) + hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + if (flags & ETH_RSS_NONFRAG_IPV4_SCTP) hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; - if (flags & ETH_RSS_NONF_IPV4_OTHER) + if (flags & ETH_RSS_NONFRAG_IPV4_OTHER) hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; - if (flags & ETH_RSS_FRAG_IPV4) - hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4; - if (flags & ETH_RSS_NONF_IPV6_UDP) - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP; - if (flags & ETH_RSS_NONF_IPV6_TCP) + if (flags & ETH_RSS_FRAG_IPV6) + hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6; + if (flags & ETH_RSS_NONFRAG_IPV6_TCP) hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP; - if (flags & ETH_RSS_NONF_IPV6_SCTP) + if (flags & ETH_RSS_NONFRAG_IPV6_UDP) + hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP; + if (flags & ETH_RSS_NONFRAG_IPV6_SCTP) hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP; - if (flags & ETH_RSS_NONF_IPV6_OTHER) + if (flags & ETH_RSS_NONFRAG_IPV6_OTHER) hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; - if (flags & ETH_RSS_FRAG_IPV6) - hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6; if (flags & ETH_RSS_L2_PAYLOAD) hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD; @@ -4545,27 +4624,26 @@ i40e_parse_hena(uint64_t flags) if (!flags) return rss_hf; - - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) - rss_hf |= ETH_RSS_NONF_IPV4_UDP; + if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4)) + rss_hf |= ETH_RSS_FRAG_IPV4; if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) - rss_hf |= ETH_RSS_NONF_IPV4_TCP; + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP)) - rss_hf |= ETH_RSS_NONF_IPV4_SCTP; + rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP; if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)) - rss_hf |= ETH_RSS_NONF_IPV4_OTHER; - if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4)) - rss_hf |= ETH_RSS_FRAG_IPV4; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) - rss_hf |= ETH_RSS_NONF_IPV6_UDP; + rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER; + if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6)) + rss_hf |= ETH_RSS_FRAG_IPV6; if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) - rss_hf |= ETH_RSS_NONF_IPV6_TCP; + rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) + rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP)) - rss_hf |= ETH_RSS_NONF_IPV6_SCTP; + rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP; if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER)) - rss_hf |= ETH_RSS_NONF_IPV6_OTHER; - if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6)) - rss_hf |= ETH_RSS_FRAG_IPV6; + rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER; if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD)) rss_hf |= ETH_RSS_L2_PAYLOAD; @@ -4733,6 +4811,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, case RTE_TUNNEL_TYPE_VXLAN: tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN; break; + case RTE_TUNNEL_TYPE_NVGRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC; + break; default: /* Other tunnel types is not supported. */ PMD_DRV_LOG(ERR, "tunnel type is not supported."); @@ -4976,9 +5057,12 @@ i40e_pf_config_rss(struct i40e_pf *pf) } if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) { - /* Calculate the default hash key */ - for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - rss_key_default[i] = (uint32_t)rte_rand(); + /* Random default keys */ + static uint32_t rss_key_default[] = {0x6b793944, + 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8, + 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605, + 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581}; + rss_conf.rss_key = (uint8_t *)rss_key_default; rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); @@ -5073,6 +5157,351 @@ i40e_pf_config_mq_rx(struct i40e_pf *pf) return ret; } +/* Get the symmetric hash enable configurations per port */ +static void +i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable) +{ + uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0); + + *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0; +} + +/* Set the symmetric hash enable configurations per port */ +static void +i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable) +{ + uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0); + + if (enable > 0) { + if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) { + PMD_DRV_LOG(INFO, "Symmetric hash has already " + "been enabled"); + return; + } + reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK; + } else { + if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) { + PMD_DRV_LOG(INFO, "Symmetric hash has already " + "been disabled"); + return; + } + reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK; + } + I40E_WRITE_REG(hw, I40E_PRTQF_CTL_0, reg); + I40E_WRITE_FLUSH(hw); +} + +/* + * Get global configurations of hash function type and symmetric hash enable + * per flow type (pctype). Note that global configuration means it affects all + * the ports on the same NIC. + */ +static int +i40e_get_hash_filter_global_config(struct i40e_hw *hw, + struct rte_eth_hash_global_conf *g_cfg) +{ + uint32_t reg, mask = I40E_FLOW_TYPES; + uint16_t i; + enum i40e_filter_pctype pctype; + + memset(g_cfg, 0, sizeof(*g_cfg)); + reg = I40E_READ_REG(hw, I40E_GLQF_CTL); + if (reg & I40E_GLQF_CTL_HTOEP_MASK) + g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ; + else + g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR; + PMD_DRV_LOG(DEBUG, "Hash function is %s", + (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR"); + + for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) { + if (!(mask & (1UL << i))) + continue; + mask &= ~(1UL << i); + /* Bit set indicats the coresponding flow type is supported */ + g_cfg->valid_bit_mask[0] |= (1UL << i); + pctype = i40e_flowtype_to_pctype(i); + reg = I40E_READ_REG(hw, I40E_GLQF_HSYM(pctype)); + if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) + g_cfg->sym_hash_enable_mask[0] |= (1UL << i); + } + + return 0; +} + +static int +i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg) +{ + uint32_t i; + uint32_t mask0, i40e_mask = I40E_FLOW_TYPES; + + if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ && + g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR && + g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) { + PMD_DRV_LOG(ERR, "Unsupported hash function type %d", + g_cfg->hash_func); + return -EINVAL; + } + + /* + * As i40e supports less than 32 flow types, only first 32 bits need to + * be checked. + */ + mask0 = g_cfg->valid_bit_mask[0]; + for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) { + if (i == 0) { + /* Check if any unsupported flow type configured */ + if ((mask0 | i40e_mask) ^ i40e_mask) + goto mask_err; + } else { + if (g_cfg->valid_bit_mask[i]) + goto mask_err; + } + } + + return 0; + +mask_err: + PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured"); + + return -EINVAL; +} + +/* + * Set global configurations of hash function type and symmetric hash enable + * per flow type (pctype). Note any modifying global configuration will affect + * all the ports on the same NIC. + */ +static int +i40e_set_hash_filter_global_config(struct i40e_hw *hw, + struct rte_eth_hash_global_conf *g_cfg) +{ + int ret; + uint16_t i; + uint32_t reg; + uint32_t mask0 = g_cfg->valid_bit_mask[0]; + enum i40e_filter_pctype pctype; + + /* Check the input parameters */ + ret = i40e_hash_global_config_check(g_cfg); + if (ret < 0) + return ret; + + for (i = 0; mask0 && i < UINT32_BIT; i++) { + if (!(mask0 & (1UL << i))) + continue; + mask0 &= ~(1UL << i); + pctype = i40e_flowtype_to_pctype(i); + reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ? + I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; + I40E_WRITE_REG(hw, I40E_GLQF_HSYM(pctype), reg); + } + + reg = I40E_READ_REG(hw, I40E_GLQF_CTL); + if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) { + /* Toeplitz */ + if (reg & I40E_GLQF_CTL_HTOEP_MASK) { + PMD_DRV_LOG(DEBUG, "Hash function already set to " + "Toeplitz"); + goto out; + } + reg |= I40E_GLQF_CTL_HTOEP_MASK; + } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { + /* Simple XOR */ + if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) { + PMD_DRV_LOG(DEBUG, "Hash function already set to " + "Simple XOR"); + goto out; + } + reg &= ~I40E_GLQF_CTL_HTOEP_MASK; + } else + /* Use the default, and keep it as it is */ + goto out; + + I40E_WRITE_REG(hw, I40E_GLQF_CTL, reg); + +out: + I40E_WRITE_FLUSH(hw); + + return 0; +} + +static int +i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info) +{ + int ret = 0; + + if (!hw || !info) { + PMD_DRV_LOG(ERR, "Invalid pointer"); + return -EFAULT; + } + + switch (info->info_type) { + case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT: + i40e_get_symmetric_hash_enable_per_port(hw, + &(info->info.enable)); + break; + case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG: + ret = i40e_get_hash_filter_global_config(hw, + &(info->info.global_conf)); + break; + default: + PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported", + info->info_type); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info) +{ + int ret = 0; + + if (!hw || !info) { + PMD_DRV_LOG(ERR, "Invalid pointer"); + return -EFAULT; + } + + switch (info->info_type) { + case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT: + i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable); + break; + case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG: + ret = i40e_set_hash_filter_global_config(hw, + &(info->info.global_conf)); + break; + default: + PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported", + info->info_type); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Operations for hash function */ +static int +i40e_hash_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret = 0; + + switch (filter_op) { + case RTE_ETH_FILTER_NOP: + break; + case RTE_ETH_FILTER_GET: + ret = i40e_hash_filter_get(hw, + (struct rte_eth_hash_filter_info *)arg); + break; + case RTE_ETH_FILTER_SET: + ret = i40e_hash_filter_set(hw, + (struct rte_eth_hash_filter_info *)arg); + break; + default: + PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported", + filter_op); + ret = -ENOTSUP; + break; + } + + return ret; +} + +/* + * Configure ethertype filter, which can director packet by filtering + * with mac address and ether_type or only ether_type + */ +static int +i40e_ethertype_filter_set(struct i40e_pf *pf, + struct rte_eth_ethertype_filter *filter, + bool add) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_control_filter_stats stats; + uint16_t flags = 0; + int ret; + + if (filter->queue >= pf->dev_data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "Invalid queue ID"); + return -EINVAL; + } + if (filter->ether_type == ETHER_TYPE_IPv4 || + filter->ether_type == ETHER_TYPE_IPv6) { + PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" + " control packet filter.", filter->ether_type); + return -EINVAL; + } + if (filter->ether_type == ETHER_TYPE_VLAN) + PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is" + " not supported."); + + if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC)) + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC; + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP; + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE; + + memset(&stats, 0, sizeof(stats)); + ret = i40e_aq_add_rem_control_packet_filter(hw, + filter->mac_addr.addr_bytes, + filter->ether_type, flags, + pf->main_vsi->seid, + filter->queue, add, &stats, NULL); + + PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d," + " mac_etype_used = %u, etype_used = %u," + " mac_etype_free = %u, etype_free = %u\n", + ret, stats.mac_etype_used, stats.etype_used, + stats.mac_etype_free, stats.etype_free); + if (ret < 0) + return -ENOSYS; + return 0; +} + +/* + * Handle operations for ethertype filter. + */ +static int +i40e_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ret = 0; + + if (filter_op == RTE_ETH_FILTER_NOP) + return ret; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = i40e_ethertype_filter_set(pf, + (struct rte_eth_ethertype_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = i40e_ethertype_filter_set(pf, + (struct rte_eth_ethertype_filter *)arg, + FALSE); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); + ret = -ENOSYS; + break; + } + return ret; +} + static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, @@ -5085,12 +5514,21 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev, return -EINVAL; switch (filter_type) { + case RTE_ETH_FILTER_HASH: + ret = i40e_hash_filter_ctrl(dev, filter_op, arg); + break; case RTE_ETH_FILTER_MACVLAN: ret = i40e_mac_filter_handle(dev, filter_op, arg); break; + case RTE_ETH_FILTER_ETHERTYPE: + ret = i40e_ethertype_filter_handle(dev, filter_op, arg); + break; case RTE_ETH_FILTER_TUNNEL: ret = i40e_tunnel_filter_handle(dev, filter_op, arg); break; + case RTE_ETH_FILTER_FDIR: + ret = i40e_fdir_ctrl_func(dev, filter_op, arg); + break; default: PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", filter_type); @@ -5100,3 +5538,151 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev, return ret; } + +/* + * As some registers wouldn't be reset unless a global hardware reset, + * hardware initialization is needed to put those registers into an + * expected initial state. + */ +static void +i40e_hw_init(struct i40e_hw *hw) +{ + /* clear the PF Queue Filter control register */ + I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, 0); + + /* Disable symmetric hash per port */ + i40e_set_symmetric_hash_enable_per_port(hw, 0); +} + +enum i40e_filter_pctype +i40e_flowtype_to_pctype(uint16_t flow_type) +{ + static const enum i40e_filter_pctype pctype_table[] = { + [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4, + [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = + I40E_FILTER_PCTYPE_NONF_IPV4_UDP, + [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = + I40E_FILTER_PCTYPE_NONF_IPV4_TCP, + [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = + I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, + [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = + I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, + [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6, + [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = + I40E_FILTER_PCTYPE_NONF_IPV6_UDP, + [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = + I40E_FILTER_PCTYPE_NONF_IPV6_TCP, + [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = + I40E_FILTER_PCTYPE_NONF_IPV6_SCTP, + [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = + I40E_FILTER_PCTYPE_NONF_IPV6_OTHER, + [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD, + }; + + return pctype_table[flow_type]; +} + +uint16_t +i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) +{ + static const uint16_t flowtype_table[] = { + [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4, + [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = + RTE_ETH_FLOW_NONFRAG_IPV4_UDP, + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = + RTE_ETH_FLOW_NONFRAG_IPV4_TCP, + [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = + RTE_ETH_FLOW_NONFRAG_IPV4_SCTP, + [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = + RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, + [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6, + [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = + RTE_ETH_FLOW_NONFRAG_IPV6_UDP, + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = + RTE_ETH_FLOW_NONFRAG_IPV6_TCP, + [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = + RTE_ETH_FLOW_NONFRAG_IPV6_SCTP, + [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = + RTE_ETH_FLOW_NONFRAG_IPV6_OTHER, + [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD, + }; + + return flowtype_table[pctype]; +} + +static int +i40e_debug_read_register(struct i40e_hw *hw, uint32_t addr, uint64_t *val) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); + desc.params.internal.param1 = rte_cpu_to_le_32(addr); + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + if (status < 0) + return status; + + *val = ((uint64_t)(rte_le_to_cpu_32(desc.params.internal.param2)) << + (CHAR_BIT * sizeof(uint32_t))) + + rte_le_to_cpu_32(desc.params.internal.param3); + + return status; +} + +/* + * On X710, performance number is far from the expectation on recent firmware + * versions. The fix for this issue may not be integrated in the following + * firmware version. So the workaround in software driver is needed. It needs + * to modify the initial values of 3 internal only registers. Note that the + * workaround can be removed when it is fixed in firmware in the future. + */ +static void +i40e_configure_registers(struct i40e_hw *hw) +{ +#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00 +#define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08 +#define I40E_GL_SWR_PM_UP_THR 0x269FBC +#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200 +#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200 +#define I40E_GL_SWR_PM_UP_THR_VALUE 0x03030303 + + static const struct { + uint32_t addr; + uint64_t val; + } reg_table[] = { + {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE}, + {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE}, + {I40E_GL_SWR_PM_UP_THR, I40E_GL_SWR_PM_UP_THR_VALUE}, + }; + uint64_t reg; + uint32_t i; + int ret; + + /* Below fix is for X710 only */ + if (i40e_is_40G_device(hw->device_id)) + return; + + for (i = 0; i < RTE_DIM(reg_table); i++) { + ret = i40e_debug_read_register(hw, reg_table[i].addr, ®); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32, + reg_table[i].addr); + break; + } + PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64, + reg_table[i].addr, reg); + if (reg == reg_table[i].val) + continue; + + ret = i40e_aq_debug_write_register(hw, reg_table[i].addr, + reg_table[i].val, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the " + "address of 0x%"PRIx32, reg_table[i].val, + reg_table[i].addr); + break; + } + PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of " + "0x%"PRIx32, reg_table[i].val, reg_table[i].addr); + } +}