X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev.c;h=c68fba125ea79ab6a4dddc2fd16dc852ac858539;hb=b342fd9084cf99292f0077e16b0edb3b6b79f6e7;hp=5999c964bdb9a33f0dcd8c756470c79fe1a31951;hpb=73fb89dd6a0010eb1435a83a173187688a61a242;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 5999c964bd..c68fba125e 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -443,6 +443,9 @@ static const struct rte_pci_id pci_id_i40e_map[] = { { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -522,6 +525,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .mac_addr_set = i40e_set_default_mac_addr, .mtu_set = i40e_dev_mtu_set, .tm_ops_get = i40e_tm_ops_get, + .tx_done_cleanup = i40e_tx_done_cleanup, }; /* store statistics names and its offset in stats structure */ @@ -1106,6 +1110,7 @@ i40e_init_customized_info(struct i40e_pf *pf) } pf->gtp_support = false; + pf->esp_support = false; } void @@ -1647,11 +1652,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) /* Set the max frame size to 0x2600 by default, * in case other drivers changed the default value. */ - i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL); + i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL); /* initialize mirror rule list */ TAILQ_INIT(&pf->mirror_list); + /* initialize RSS rule list */ + TAILQ_INIT(&pf->rss_config_list); + /* initialize Traffic Manager configuration */ i40e_tm_conf_init(dev); @@ -1671,7 +1679,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) /* initialize queue region configuration */ i40e_init_queue_region_conf(dev); - /* initialize rss configuration from rte_flow */ + /* initialize RSS configuration from rte_flow */ memset(&pf->rss_info, 0, sizeof(struct i40e_rte_flow_rss_conf)); @@ -2241,6 +2249,9 @@ i40e_apply_link_speed(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_conf *conf = &dev->data->dev_conf; + abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK | + I40E_AQ_PHY_LINK_ENABLED; + if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) { conf->link_speeds = ETH_LINK_SPEED_40G | ETH_LINK_SPEED_25G | @@ -2248,11 +2259,12 @@ i40e_apply_link_speed(struct rte_eth_dev *dev) ETH_LINK_SPEED_10G | ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M; + + abilities |= I40E_AQ_PHY_AN_ENABLED; + } else { + abilities &= ~I40E_AQ_PHY_AN_ENABLED; } speed = i40e_parse_link_speeds(conf->link_speeds); - abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK | - I40E_AQ_PHY_AN_ENABLED | - I40E_AQ_PHY_LINK_ENABLED; return i40e_phy_conf_link(hw, abilities, speed, true); } @@ -2271,13 +2283,6 @@ i40e_dev_start(struct rte_eth_dev *dev) hw->adapter_stopped = 0; - if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { - PMD_INIT_LOG(ERR, - "Invalid link_speeds for port %u, autonegotiation disabled", - dev->data->port_id); - return -EINVAL; - } - rte_intr_disable(intr_handle); if ((rte_intr_cap_multiple(intr_handle) || @@ -2594,7 +2599,7 @@ i40e_dev_close(struct rte_eth_dev *dev) do { ret = rte_intr_callback_unregister(intr_handle, i40e_dev_interrupt_handler, dev); - if (ret >= 0) { + if (ret >= 0 || ret == -ENOENT) { break; } else if (ret != -EAGAIN) { PMD_INIT_LOG(ERR, @@ -4488,7 +4493,7 @@ out: * @alignment: what to align the allocation to **/ enum i40e_status_code -i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, +i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw, struct i40e_dma_mem *mem, u64 size, u32 alignment) @@ -4522,7 +4527,7 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, * @mem: ptr to mem struct to free **/ enum i40e_status_code -i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, +i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw, struct i40e_dma_mem *mem) { if (!mem) @@ -4546,7 +4551,7 @@ i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, * @size: size of memory requested **/ enum i40e_status_code -i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw, +i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size) { @@ -4568,7 +4573,7 @@ i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw, * @mem: pointer to mem struct to free **/ enum i40e_status_code -i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw, +i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw, struct i40e_virt_mem *mem) { if (!mem) @@ -4599,7 +4604,7 @@ i40e_release_spinlock_d(struct i40e_spinlock *sp) } void -i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp) +i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp) { return; } @@ -4930,6 +4935,7 @@ i40e_res_pool_free(struct i40e_res_pool_info *pool, { struct pool_entry *entry, *next, *prev, *valid_entry = NULL; uint32_t pool_offset; + uint16_t len; int insert; if (pool == NULL) { @@ -4968,12 +4974,13 @@ i40e_res_pool_free(struct i40e_res_pool_info *pool, } insert = 0; + len = valid_entry->len; /* Try to merge with next one*/ if (next != NULL) { /* Merge with next one */ - if (valid_entry->base + valid_entry->len == next->base) { + if (valid_entry->base + len == next->base) { next->base = valid_entry->base; - next->len += valid_entry->len; + next->len += len; rte_free(valid_entry); valid_entry = next; insert = 1; @@ -4983,13 +4990,15 @@ i40e_res_pool_free(struct i40e_res_pool_info *pool, if (prev != NULL) { /* Merge with previous one */ if (prev->base + prev->len == valid_entry->base) { - prev->len += valid_entry->len; + prev->len += len; /* If it merge with next one, remove next node */ if (insert == 1) { LIST_REMOVE(valid_entry, next); rte_free(valid_entry); + valid_entry = NULL; } else { rte_free(valid_entry); + valid_entry = NULL; insert = 1; } } @@ -5005,8 +5014,8 @@ i40e_res_pool_free(struct i40e_res_pool_info *pool, LIST_INSERT_HEAD(&pool->free_list, valid_entry, next); } - pool->num_free += valid_entry->len; - pool->num_alloc -= valid_entry->len; + pool->num_free += len; + pool->num_alloc -= len; return 0; } @@ -6760,6 +6769,92 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) rte_free(info.msg_buf); } +static void +i40e_handle_mdd_event(struct rte_eth_dev *dev) +{ +#define I40E_MDD_CLEAR32 0xFFFFFFFF +#define I40E_MDD_CLEAR16 0xFFFF + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + bool mdd_detected = false; + struct i40e_pf_vf *vf; + uint32_t reg; + int i; + + /* find what triggered the MDD event */ + reg = I40E_READ_REG(hw, I40E_GL_MDET_TX); + if (reg & I40E_GL_MDET_TX_VALID_MASK) { + uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> + I40E_GL_MDET_TX_PF_NUM_SHIFT; + uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> + I40E_GL_MDET_TX_VF_NUM_SHIFT; + uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> + I40E_GL_MDET_TX_EVENT_SHIFT; + uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> + I40E_GL_MDET_TX_QUEUE_SHIFT) - + hw->func_caps.base_queue; + PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX " + "queue %d PF number 0x%02x VF number 0x%02x device %s\n", + event, queue, pf_num, vf_num, dev->data->name); + I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32); + mdd_detected = true; + } + reg = I40E_READ_REG(hw, I40E_GL_MDET_RX); + if (reg & I40E_GL_MDET_RX_VALID_MASK) { + uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> + I40E_GL_MDET_RX_FUNCTION_SHIFT; + uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> + I40E_GL_MDET_RX_EVENT_SHIFT; + uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> + I40E_GL_MDET_RX_QUEUE_SHIFT) - + hw->func_caps.base_queue; + + PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX " + "queue %d of function 0x%02x device %s\n", + event, queue, func, dev->data->name); + I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32); + mdd_detected = true; + } + + if (mdd_detected) { + reg = I40E_READ_REG(hw, I40E_PF_MDET_TX); + if (reg & I40E_PF_MDET_TX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16); + PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n"); + } + reg = I40E_READ_REG(hw, I40E_PF_MDET_RX); + if (reg & I40E_PF_MDET_RX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_PF_MDET_RX, + I40E_MDD_CLEAR16); + PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n"); + } + } + + /* see if one of the VFs needs its hand slapped */ + for (i = 0; i < pf->vf_num && mdd_detected; i++) { + vf = &pf->vfs[i]; + reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i)); + if (reg & I40E_VP_MDET_TX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i), + I40E_MDD_CLEAR16); + vf->num_mdd_events++; + PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-" + PRIu64 "times\n", + i, vf->num_mdd_events); + } + + reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i)); + if (reg & I40E_VP_MDET_RX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i), + I40E_MDD_CLEAR16); + vf->num_mdd_events++; + PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-" + PRIu64 "times\n", + i, vf->num_mdd_events); + } + } +} + /** * Interrupt handler triggered by NIC for handling * specific interrupt. @@ -6792,8 +6887,10 @@ i40e_dev_interrupt_handler(void *param) } if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK) PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error"); - if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) + if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { PMD_DRV_LOG(ERR, "ICR0: malicious programming detected"); + i40e_handle_mdd_event(dev); + } if (icr0 & I40E_PFINT_ICR0_GRST_MASK) PMD_DRV_LOG(INFO, "ICR0: global reset requested"); if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) @@ -6837,8 +6934,10 @@ i40e_dev_alarm_handler(void *param) goto done; if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK) PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error"); - if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) + if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { PMD_DRV_LOG(ERR, "ICR0: malicious programming detected"); + i40e_handle_mdd_event(dev); + } if (icr0 & I40E_PFINT_ICR0_GRST_MASK) PMD_DRV_LOG(INFO, "ICR0: global reset requested"); if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) @@ -8679,7 +8778,9 @@ i40e_pf_config_rss(struct i40e_pf *pf) num); if (num == 0) { - PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS"); + PMD_INIT_LOG(ERR, + "No PF queues are configured to enable RSS for port %u", + pf->dev_data->port_id); return -ENOTSUP; } @@ -9249,6 +9350,7 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | @@ -9264,6 +9366,7 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | @@ -9280,6 +9383,7 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT, [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = + I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO | @@ -10411,6 +10515,7 @@ i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value) { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) }, { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) }, { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) }, + { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) }, { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) }, { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) }, @@ -11576,13 +11681,17 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) * LLDP MIB change event. */ if (sw_dcb == TRUE) { - if (i40e_need_stop_lldp(dev)) { - ret = i40e_aq_stop_lldp(hw, TRUE, NULL); - if (ret != I40E_SUCCESS) - PMD_INIT_LOG(DEBUG, "Failed to stop lldp"); - } + /* Stopping lldp is necessary for DPDK, but it will cause + * DCB init failed. For i40e_init_dcb(), the prerequisite + * for successful initialization of DCB is that LLDP is + * enabled. So it is needed to start lldp before DCB init + * and stop it after initialization. + */ + ret = i40e_aq_start_lldp(hw, true, NULL); + if (ret != I40E_SUCCESS) + PMD_INIT_LOG(DEBUG, "Failed to start lldp"); - ret = i40e_init_dcb(hw); + ret = i40e_init_dcb(hw, true); /* If lldp agent is stopped, the return value from * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM * adminq status. Otherwise, it should return success. @@ -11625,12 +11734,18 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) ret, hw->aq.asq_last_status); return -ENOTSUP; } + + if (i40e_need_stop_lldp(dev)) { + ret = i40e_aq_stop_lldp(hw, true, true, NULL); + if (ret != I40E_SUCCESS) + PMD_INIT_LOG(DEBUG, "Failed to stop lldp"); + } } else { - ret = i40e_aq_start_lldp(hw, NULL); + ret = i40e_aq_start_lldp(hw, true, NULL); if (ret != I40E_SUCCESS) PMD_INIT_LOG(DEBUG, "Failed to start lldp"); - ret = i40e_init_dcb(hw); + ret = i40e_init_dcb(hw, true); if (!ret) { if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { PMD_INIT_LOG(ERR, @@ -12221,14 +12336,16 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf) } } -/* Restore rss filter */ +/* Restore RSS filter */ static inline void i40e_rss_filter_restore(struct i40e_pf *pf) { - struct i40e_rte_flow_rss_conf *conf = - &pf->rss_info; - if (conf->conf.queue_num) - i40e_config_rss_filter(pf, conf, TRUE); + struct i40e_rss_conf_list *list = &pf->rss_config_list; + struct i40e_rss_filter *filter; + + TAILQ_FOREACH(filter, list, next) { + i40e_config_rss_filter(pf, &filter->rss_filter_info, TRUE); + } } static void @@ -12335,6 +12452,7 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, } } name[strlen(name) - 1] = '\0'; + PMD_DRV_LOG(INFO, "name = %s\n", name); if (!strcmp(name, "GTPC")) new_pctype = i40e_find_customized_pctype(pf, @@ -12351,6 +12469,38 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, new_pctype = i40e_find_customized_pctype(pf, I40E_CUSTOMIZED_GTPU); + else if (!strcmp(name, "IPV4_L2TPV3")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_IPV4_L2TPV3); + else if (!strcmp(name, "IPV6_L2TPV3")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_IPV6_L2TPV3); + else if (!strcmp(name, "IPV4_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV4); + else if (!strcmp(name, "IPV6_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV6); + else if (!strcmp(name, "IPV4_UDP_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV4_UDP); + else if (!strcmp(name, "IPV6_UDP_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV6_UDP); + else if (!strcmp(name, "IPV4_AH")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_AH_IPV4); + else if (!strcmp(name, "IPV6_AH")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_AH_IPV6); if (new_pctype) { if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) { new_pctype->pctype = pctype_value; @@ -12446,6 +12596,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, continue; memset(name, 0, sizeof(name)); strcpy(name, proto[n].name); + PMD_DRV_LOG(INFO, "name = %s\n", name); if (!strncasecmp(name, "PPPOE", 5)) ptype_mapping[i].sw_ptype |= RTE_PTYPE_L2_ETHER_PPPOE; @@ -12539,12 +12690,17 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, ptype_mapping[i].sw_ptype |= RTE_PTYPE_TUNNEL_GTPU; in_tunnel = true; + } else if (!strncasecmp(name, "ESP", 3)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_ESP; + in_tunnel = true; } else if (!strncasecmp(name, "GRENAT", 6)) { ptype_mapping[i].sw_ptype |= RTE_PTYPE_TUNNEL_GRENAT; in_tunnel = true; } else if (!strncasecmp(name, "L2TPV2CTL", 9) || - !strncasecmp(name, "L2TPV2", 6)) { + !strncasecmp(name, "L2TPV2", 6) || + !strncasecmp(name, "L2TPV3", 6)) { ptype_mapping[i].sw_ptype |= RTE_PTYPE_TUNNEL_L2TP; in_tunnel = true; @@ -12558,7 +12714,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping, ptype_num, 0); if (ret) - PMD_DRV_LOG(ERR, "Failed to update mapping table."); + PMD_DRV_LOG(ERR, "Failed to update ptype mapping table."); rte_free(ptype_mapping); rte_free(ptype); @@ -12623,6 +12779,17 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, } } + /* Check if ESP is supported. */ + for (i = 0; i < proto_num; i++) { + if (!strncmp(proto[i].name, "ESP", 3)) { + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) + pf->esp_support = true; + else + pf->esp_support = false; + break; + } + } + /* Update customized pctype info */ ret = i40e_update_customized_pctype(dev, pkg, pkg_size, proto_num, proto, op); @@ -12788,45 +12955,303 @@ i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out, return 0; } -int -i40e_action_rss_same(const struct rte_flow_action_rss *comp, - const struct rte_flow_action_rss *with) +/* Write HENA register to enable hash */ +static int +i40e_rss_hash_set(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *rss_conf) { - return (comp->func == with->func && - comp->level == with->level && - comp->types == with->types && - comp->key_len == with->key_len && - comp->queue_num == with->queue_num && - !memcmp(comp->key, with->key, with->key_len) && - !memcmp(comp->queue, with->queue, - sizeof(*with->queue) * with->queue_num)); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint8_t *key = (void *)(uintptr_t)rss_conf->conf.key; + uint64_t hena; + int ret; + + ret = i40e_set_rss_key(pf->main_vsi, key, + rss_conf->conf.key_len); + if (ret) + return ret; + + hena = i40e_config_hena(pf->adapter, rss_conf->conf.types); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); + I40E_WRITE_FLUSH(hw); + + return 0; } -int -i40e_config_rss_filter(struct i40e_pf *pf, - struct i40e_rte_flow_rss_conf *conf, bool add) +/* Configure hash input set */ +static int +i40e_rss_conf_hash_inset(struct i40e_pf *pf, uint64_t types) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); - uint32_t i, lut = 0; - uint16_t j, num; - struct rte_eth_rss_conf rss_conf = { - .rss_key = conf->conf.key_len ? - (void *)(uintptr_t)conf->conf.key : NULL, - .rss_key_len = conf->conf.key_len, - .rss_hf = conf->conf.types, + struct rte_eth_input_set_conf conf; + uint64_t mask0; + int ret = 0; + uint32_t j; + int i; + static const struct { + uint64_t type; + enum rte_eth_input_set_field field; + } inset_match_table[] = { + {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP4}, + {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP4}, + {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_UNKNOWN}, + {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_UNKNOWN}, + + {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP4}, + {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP4}, + {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT}, + {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_L4_TCP_DST_PORT}, + + {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP4}, + {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP4}, + {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT}, + {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_L4_UDP_DST_PORT}, + + {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP4}, + {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP4}, + {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT}, + {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT}, + + {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP4}, + {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP4}, + {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_UNKNOWN}, + {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_UNKNOWN}, + + {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP6}, + {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP6}, + {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_UNKNOWN}, + {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_UNKNOWN}, + + {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP6}, + {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP6}, + {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT}, + {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_L4_TCP_DST_PORT}, + + {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP6}, + {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP6}, + {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT}, + {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_L4_UDP_DST_PORT}, + + {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP6}, + {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP6}, + {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT}, + {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT}, + + {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP6}, + {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP6}, + {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_UNKNOWN}, + {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_UNKNOWN}, }; - struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; - if (!add) { - if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) { - i40e_pf_disable_rss(pf); - memset(rss_info, 0, - sizeof(struct i40e_rte_flow_rss_conf)); + mask0 = types & pf->adapter->flow_types_mask; + conf.op = RTE_ETH_INPUT_SET_SELECT; + conf.inset_size = 0; + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; i++) { + if (mask0 & (1ULL << i)) { + conf.flow_type = i; + break; + } + } + + for (j = 0; j < RTE_DIM(inset_match_table); j++) { + if ((types & inset_match_table[j].type) == + inset_match_table[j].type) { + if (inset_match_table[j].field == + RTE_ETH_INPUT_SET_UNKNOWN) + return -EINVAL; + + conf.field[conf.inset_size] = + inset_match_table[j].field; + conf.inset_size++; + } + } + + if (conf.inset_size) { + ret = i40e_hash_filter_inset_select(hw, &conf); + if (ret) + return ret; + } + + return ret; +} + +/* Look up the conflicted rule then mark it as invalid */ +static void +i40e_rss_mark_invalid_rule(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_rss_filter *rss_item; + uint64_t rss_inset; + + /* Clear input set bits before comparing the pctype */ + rss_inset = ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | + ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY); + + /* Look up the conflicted rule then mark it as invalid */ + TAILQ_FOREACH(rss_item, &pf->rss_config_list, next) { + if (!rss_item->rss_filter_info.valid) + continue; + + if (conf->conf.queue_num && + rss_item->rss_filter_info.conf.queue_num) + rss_item->rss_filter_info.valid = false; + + if (conf->conf.types && + (rss_item->rss_filter_info.conf.types & + rss_inset) == + (conf->conf.types & rss_inset)) + rss_item->rss_filter_info.valid = false; + + if (conf->conf.func == + RTE_ETH_HASH_FUNCTION_SIMPLE_XOR && + rss_item->rss_filter_info.conf.func == + RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) + rss_item->rss_filter_info.valid = false; + } +} + +/* Configure RSS hash function */ +static int +i40e_rss_config_hash_function(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t reg, i; + uint64_t mask0; + uint16_t j; + + if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { + reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); + if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) { + PMD_DRV_LOG(DEBUG, "Hash function already set to Simple XOR"); + I40E_WRITE_FLUSH(hw); + i40e_rss_mark_invalid_rule(pf, conf); + return 0; } + reg &= ~I40E_GLQF_CTL_HTOEP_MASK; + + i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg); + I40E_WRITE_FLUSH(hw); + i40e_rss_mark_invalid_rule(pf, conf); + } else if (conf->conf.func == + RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) { + mask0 = conf->conf.types & pf->adapter->flow_types_mask; + + i40e_set_symmetric_hash_enable_per_port(hw, 1); + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) { + if (mask0 & (1UL << i)) + break; + } + + if (i == UINT64_BIT) + return -EINVAL; + + for (j = I40E_FILTER_PCTYPE_INVALID + 1; + j < I40E_FILTER_PCTYPE_MAX; j++) { + if (pf->adapter->pctypes_tbl[i] & (1ULL << j)) + i40e_write_global_rx_ctl(hw, + I40E_GLQF_HSYM(j), + I40E_GLQF_HSYM_SYMH_ENA_MASK); + } + } + + return 0; +} + +/* Enable RSS according to the configuration */ +static int +i40e_rss_enable_hash(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; + struct i40e_rte_flow_rss_conf rss_conf; + + if (!(conf->conf.types & pf->adapter->flow_types_mask)) + return -ENOTSUP; + + memset(&rss_conf, 0, sizeof(rss_conf)); + rte_memcpy(&rss_conf, conf, sizeof(rss_conf)); + + /* Configure hash input set */ + if (i40e_rss_conf_hash_inset(pf, conf->conf.types)) return -EINVAL; + + if (rss_conf.conf.key == NULL || rss_conf.conf.key_len < + (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) { + /* Random default keys */ + static uint32_t rss_key_default[] = {0x6b793944, + 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8, + 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605, + 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581}; + + rss_conf.conf.key = (uint8_t *)rss_key_default; + rss_conf.conf.key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + PMD_DRV_LOG(INFO, + "No valid RSS key config for i40e, using default\n"); } + rss_conf.conf.types |= rss_info->conf.types; + i40e_rss_hash_set(pf, &rss_conf); + + if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) + i40e_rss_config_hash_function(pf, conf); + + i40e_rss_mark_invalid_rule(pf, conf); + + return 0; +} + +/* Configure RSS queue region */ +static int +i40e_rss_config_queue_region(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t lut = 0; + uint16_t j, num; + uint32_t i; + /* If both VMDQ and RSS enabled, not all of PF queues are configured. * It's necessary to calculate the actual PF queues that are configured. */ @@ -12840,7 +13265,9 @@ i40e_config_rss_filter(struct i40e_pf *pf, num); if (num == 0) { - PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS"); + PMD_DRV_LOG(ERR, + "No PF queues are configured to enable RSS for port %u", + pf->dev_data->port_id); return -ENOTSUP; } @@ -12854,29 +13281,198 @@ i40e_config_rss_filter(struct i40e_pf *pf, I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut); } - if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) { - i40e_pf_disable_rss(pf); - return 0; + i40e_rss_mark_invalid_rule(pf, conf); + + return 0; +} + +/* Configure RSS hash function to default */ +static int +i40e_rss_clear_hash_function(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t i, reg; + uint64_t mask0; + uint16_t j; + + if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { + reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); + if (reg & I40E_GLQF_CTL_HTOEP_MASK) { + PMD_DRV_LOG(DEBUG, + "Hash function already set to Toeplitz"); + I40E_WRITE_FLUSH(hw); + + return 0; + } + reg |= I40E_GLQF_CTL_HTOEP_MASK; + + i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg); + I40E_WRITE_FLUSH(hw); + } else if (conf->conf.func == + RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) { + mask0 = conf->conf.types & pf->adapter->flow_types_mask; + + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) { + if (mask0 & (1UL << i)) + break; + } + + if (i == UINT64_BIT) + return -EINVAL; + + for (j = I40E_FILTER_PCTYPE_INVALID + 1; + j < I40E_FILTER_PCTYPE_MAX; j++) { + if (pf->adapter->pctypes_tbl[i] & (1ULL << j)) + i40e_write_global_rx_ctl(hw, + I40E_GLQF_HSYM(j), + 0); + } } - if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < - (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) { - /* Random default keys */ - static uint32_t rss_key_default[] = {0x6b793944, - 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8, - 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605, - 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581}; - rss_conf.rss_key = (uint8_t *)rss_key_default; - rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * - sizeof(uint32_t); - PMD_DRV_LOG(INFO, - "No valid RSS key config for i40e, using default\n"); + return 0; +} + +/* Disable RSS hash and configure default input set */ +static int +i40e_rss_disable_hash(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_rte_flow_rss_conf rss_conf; + uint32_t i; + + memset(&rss_conf, 0, sizeof(rss_conf)); + rte_memcpy(&rss_conf, conf, sizeof(rss_conf)); + + /* Disable RSS hash */ + rss_conf.conf.types = rss_info->conf.types & ~(conf->conf.types); + i40e_rss_hash_set(pf, &rss_conf); + + for (i = RTE_ETH_FLOW_IPV4; i <= RTE_ETH_FLOW_L2_PAYLOAD; i++) { + if (!(pf->adapter->flow_types_mask & (1ULL << i)) || + !(conf->conf.types & (1ULL << i))) + continue; + + /* Configure default input set */ + struct rte_eth_input_set_conf input_conf = { + .op = RTE_ETH_INPUT_SET_SELECT, + .flow_type = i, + .inset_size = 1, + }; + input_conf.field[0] = RTE_ETH_INPUT_SET_DEFAULT; + i40e_hash_filter_inset_select(hw, &input_conf); } - i40e_hw_rss_hash_set(pf, &rss_conf); + rss_info->conf.types = rss_conf.conf.types; - if (i40e_rss_conf_init(rss_info, &conf->conf)) - return -EINVAL; + i40e_rss_clear_hash_function(pf, conf); + + return 0; +} + +/* Configure RSS queue region to default */ +static int +i40e_rss_clear_queue_region(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; + uint16_t queue[I40E_MAX_Q_PER_TC]; + uint32_t num_rxq, i; + uint32_t lut = 0; + uint16_t j, num; + + num_rxq = RTE_MIN(pf->dev_data->nb_rx_queues, I40E_MAX_Q_PER_TC); + + for (j = 0; j < num_rxq; j++) + queue[j] = j; + + /* If both VMDQ and RSS enabled, not all of PF queues are configured. + * It's necessary to calculate the actual PF queues that are configured. + */ + if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) + num = i40e_pf_calc_configured_queues_num(pf); + else + num = pf->dev_data->nb_rx_queues; + + num = RTE_MIN(num, num_rxq); + PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured", + num); + + if (num == 0) { + PMD_DRV_LOG(ERR, + "No PF queues are configured to enable RSS for port %u", + pf->dev_data->port_id); + return -ENOTSUP; + } + + /* Fill in redirection table */ + for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { + if (j == num) + j = 0; + lut = (lut << 8) | (queue[j] & ((0x1 << + hw->func_caps.rss_table_entry_width) - 1)); + if ((i & 3) == 3) + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut); + } + + rss_info->conf.queue_num = 0; + memset(&rss_info->conf.queue, 0, sizeof(uint16_t)); + + return 0; +} + +int +i40e_config_rss_filter(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf, bool add) +{ + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; + struct rte_flow_action_rss update_conf = rss_info->conf; + int ret = 0; + + if (add) { + if (conf->conf.queue_num) { + /* Configure RSS queue region */ + ret = i40e_rss_config_queue_region(pf, conf); + if (ret) + return ret; + + update_conf.queue_num = conf->conf.queue_num; + update_conf.queue = conf->conf.queue; + } else if (conf->conf.func == + RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { + /* Configure hash function */ + ret = i40e_rss_config_hash_function(pf, conf); + if (ret) + return ret; + + update_conf.func = conf->conf.func; + } else { + /* Configure hash enable and input set */ + ret = i40e_rss_enable_hash(pf, conf); + if (ret) + return ret; + + update_conf.types |= conf->conf.types; + update_conf.key = conf->conf.key; + update_conf.key_len = conf->conf.key_len; + } + + /* Update RSS info in pf */ + if (i40e_rss_conf_init(rss_info, &update_conf)) + return -EINVAL; + } else { + if (!conf->valid) + return 0; + + if (conf->conf.queue_num) + i40e_rss_clear_queue_region(pf); + else if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) + i40e_rss_clear_hash_function(pf, conf); + else + i40e_rss_disable_hash(pf, conf); + } return 0; }