/* initialize mirror rule list */
TAILQ_INIT(&pf->mirror_list);
+ /* initialize RSS rule list */
+ TAILQ_INIT(&pf->rss_config_list);
+
/* initialize Traffic Manager configuration */
i40e_tm_conf_init(dev);
/* initialize queue region configuration */
i40e_init_queue_region_conf(dev);
- /* initialize rss configuration from rte_flow */
+ /* initialize RSS configuration from rte_flow */
memset(&pf->rss_info, 0,
sizeof(struct i40e_rte_flow_rss_conf));
}
}
-/* Restore rss filter */
+/* Restore RSS filter */
static inline void
i40e_rss_filter_restore(struct i40e_pf *pf)
{
- struct i40e_rte_flow_rss_conf *conf =
- &pf->rss_info;
- if (conf->conf.queue_num)
- i40e_config_rss_filter(pf, conf, TRUE);
+ struct i40e_rss_conf_list *list = &pf->rss_config_list;
+ struct i40e_rss_filter *filter;
+
+ TAILQ_FOREACH(filter, list, next) {
+ i40e_config_rss_filter(pf, &filter->rss_filter_info, TRUE);
+ }
}
static void
return 0;
}
-int
-i40e_action_rss_same(const struct rte_flow_action_rss *comp,
- const struct rte_flow_action_rss *with)
+/* Write HENA register to enable hash */
+static int
+i40e_rss_hash_set(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *rss_conf)
{
- return (comp->func == with->func &&
- comp->level == with->level &&
- comp->types == with->types &&
- comp->key_len == with->key_len &&
- comp->queue_num == with->queue_num &&
- !memcmp(comp->key, with->key, with->key_len) &&
- !memcmp(comp->queue, with->queue,
- sizeof(*with->queue) * with->queue_num));
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint8_t *key = (void *)(uintptr_t)rss_conf->conf.key;
+ uint64_t hena;
+ int ret;
+
+ ret = i40e_set_rss_key(pf->main_vsi, key,
+ rss_conf->conf.key_len);
+ if (ret)
+ return ret;
+
+ hena = i40e_config_hena(pf->adapter, rss_conf->conf.types);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
}
-int
-i40e_config_rss_filter(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf, bool add)
+/* Configure hash input set */
+static int
+i40e_rss_conf_hash_inset(struct i40e_pf *pf, uint64_t types)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- uint32_t i, lut = 0;
- uint16_t j, num;
- struct rte_eth_rss_conf rss_conf = {
- .rss_key = conf->conf.key_len ?
- (void *)(uintptr_t)conf->conf.key : NULL,
- .rss_key_len = conf->conf.key_len,
- .rss_hf = conf->conf.types,
+ struct rte_eth_input_set_conf conf;
+ uint64_t mask0;
+ int ret = 0;
+ uint32_t j;
+ int i;
+ static const struct {
+ uint64_t type;
+ enum rte_eth_input_set_field field;
+ } inset_match_table[] = {
+ {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP4},
+ {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP4},
+ {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_UNKNOWN},
+ {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_UNKNOWN},
+
+ {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP4},
+ {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP4},
+ {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
+ {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
+
+ {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP4},
+ {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP4},
+ {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
+ {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
+
+ {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP4},
+ {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP4},
+ {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
+ {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
+
+ {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP4},
+ {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP4},
+ {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_UNKNOWN},
+ {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_UNKNOWN},
+
+ {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP6},
+ {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP6},
+ {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_UNKNOWN},
+ {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_UNKNOWN},
+
+ {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP6},
+ {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP6},
+ {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
+ {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
+
+ {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP6},
+ {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP6},
+ {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
+ {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
+
+ {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP6},
+ {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP6},
+ {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
+ {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
+
+ {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP6},
+ {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP6},
+ {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_UNKNOWN},
+ {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_UNKNOWN},
};
- struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
- if (!add) {
- if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
- i40e_pf_disable_rss(pf);
- memset(rss_info, 0,
- sizeof(struct i40e_rte_flow_rss_conf));
+ mask0 = types & pf->adapter->flow_types_mask;
+ conf.op = RTE_ETH_INPUT_SET_SELECT;
+ conf.inset_size = 0;
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; i++) {
+ if (mask0 & (1ULL << i)) {
+ conf.flow_type = i;
+ break;
+ }
+ }
+
+ for (j = 0; j < RTE_DIM(inset_match_table); j++) {
+ if ((types & inset_match_table[j].type) ==
+ inset_match_table[j].type) {
+ if (inset_match_table[j].field ==
+ RTE_ETH_INPUT_SET_UNKNOWN)
+ return -EINVAL;
+
+ conf.field[conf.inset_size] =
+ inset_match_table[j].field;
+ conf.inset_size++;
+ }
+ }
+
+ if (conf.inset_size) {
+ ret = i40e_hash_filter_inset_select(hw, &conf);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/* Look up the conflicted rule then mark it as invalid */
+static void
+i40e_rss_mark_invalid_rule(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_rss_filter *rss_item;
+ uint64_t rss_inset;
+
+ /* Clear input set bits before comparing the pctype */
+ rss_inset = ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
+ ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+
+ /* Look up the conflicted rule then mark it as invalid */
+ TAILQ_FOREACH(rss_item, &pf->rss_config_list, next) {
+ if (!rss_item->rss_filter_info.valid)
+ continue;
+
+ if (conf->conf.queue_num &&
+ rss_item->rss_filter_info.conf.queue_num)
+ rss_item->rss_filter_info.valid = false;
+
+ if (conf->conf.types &&
+ (rss_item->rss_filter_info.conf.types &
+ rss_inset) ==
+ (conf->conf.types & rss_inset))
+ rss_item->rss_filter_info.valid = false;
+
+ if (conf->conf.func ==
+ RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
+ rss_item->rss_filter_info.conf.func ==
+ RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
+ rss_item->rss_filter_info.valid = false;
+ }
+}
+
+/* Configure RSS hash function */
+static int
+i40e_rss_config_hash_function(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t reg, i;
+ uint64_t mask0;
+ uint16_t j;
+
+ if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
+ reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
+ if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
+ PMD_DRV_LOG(DEBUG, "Hash function already set to Simple XOR");
+ I40E_WRITE_FLUSH(hw);
+ i40e_rss_mark_invalid_rule(pf, conf);
+
return 0;
}
+ reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
+
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
+ I40E_WRITE_FLUSH(hw);
+ i40e_rss_mark_invalid_rule(pf, conf);
+ } else if (conf->conf.func ==
+ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
+ mask0 = conf->conf.types & pf->adapter->flow_types_mask;
+
+ i40e_set_symmetric_hash_enable_per_port(hw, 1);
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
+ if (mask0 & (1UL << i))
+ break;
+ }
+
+ for (j = I40E_FILTER_PCTYPE_INVALID + 1;
+ j < I40E_FILTER_PCTYPE_MAX; j++) {
+ if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
+ i40e_write_global_rx_ctl(hw,
+ I40E_GLQF_HSYM(j),
+ I40E_GLQF_HSYM_SYMH_ENA_MASK);
+ }
+ }
+
+ return 0;
+}
+
+/* Enable RSS according to the configuration */
+static int
+i40e_rss_enable_hash(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ struct i40e_rte_flow_rss_conf rss_conf;
+
+ if (!(conf->conf.types & pf->adapter->flow_types_mask))
+ return -ENOTSUP;
+
+ memset(&rss_conf, 0, sizeof(rss_conf));
+ rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
+
+ /* Configure hash input set */
+ if (i40e_rss_conf_hash_inset(pf, conf->conf.types))
return -EINVAL;
+
+ if (rss_conf.conf.key == NULL || rss_conf.conf.key_len <
+ (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+ /* Random default keys */
+ static uint32_t rss_key_default[] = {0x6b793944,
+ 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+ 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+ 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
+ rss_conf.conf.key = (uint8_t *)rss_key_default;
+ rss_conf.conf.key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ PMD_DRV_LOG(INFO,
+ "No valid RSS key config for i40e, using default\n");
}
+ rss_conf.conf.types |= rss_info->conf.types;
+ i40e_rss_hash_set(pf, &rss_conf);
+
+ if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
+ i40e_rss_config_hash_function(pf, conf);
+
+ i40e_rss_mark_invalid_rule(pf, conf);
+
+ return 0;
+}
+
+/* Configure RSS queue region */
+static int
+i40e_rss_config_queue_region(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t lut = 0;
+ uint16_t j, num;
+ uint32_t i;
+
/* If both VMDQ and RSS enabled, not all of PF queues are configured.
* It's necessary to calculate the actual PF queues that are configured.
*/
I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
}
- if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
- i40e_pf_disable_rss(pf);
- return 0;
+ i40e_rss_mark_invalid_rule(pf, conf);
+
+ return 0;
+}
+
+/* Configure RSS hash function to default */
+static int
+i40e_rss_clear_hash_function(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t i, reg;
+ uint64_t mask0;
+ uint16_t j;
+
+ if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
+ reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
+ if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
+ PMD_DRV_LOG(DEBUG,
+ "Hash function already set to Toeplitz");
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
+ }
+ reg |= I40E_GLQF_CTL_HTOEP_MASK;
+
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
+ I40E_WRITE_FLUSH(hw);
+ } else if (conf->conf.func ==
+ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
+ mask0 = conf->conf.types & pf->adapter->flow_types_mask;
+
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
+ if (mask0 & (1UL << i))
+ break;
+ }
+
+ for (j = I40E_FILTER_PCTYPE_INVALID + 1;
+ j < I40E_FILTER_PCTYPE_MAX; j++) {
+ if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
+ i40e_write_global_rx_ctl(hw,
+ I40E_GLQF_HSYM(j),
+ 0);
+ }
}
- if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
- (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
- /* Random default keys */
- static uint32_t rss_key_default[] = {0x6b793944,
- 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
- 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
- 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
- rss_conf.rss_key = (uint8_t *)rss_key_default;
- rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
- sizeof(uint32_t);
- PMD_DRV_LOG(INFO,
- "No valid RSS key config for i40e, using default\n");
+ return 0;
+}
+
+/* Disable RSS hash and configure default input set */
+static int
+i40e_rss_disable_hash(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_rte_flow_rss_conf rss_conf;
+ uint32_t i;
+
+ memset(&rss_conf, 0, sizeof(rss_conf));
+ rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
+
+ /* Disable RSS hash */
+ rss_conf.conf.types = rss_info->conf.types & ~(conf->conf.types);
+ i40e_rss_hash_set(pf, &rss_conf);
+
+ for (i = RTE_ETH_FLOW_IPV4; i <= RTE_ETH_FLOW_L2_PAYLOAD; i++) {
+ if (!(pf->adapter->flow_types_mask & (1ULL << i)) ||
+ !(conf->conf.types & (1ULL << i)))
+ continue;
+
+ /* Configure default input set */
+ struct rte_eth_input_set_conf input_conf = {
+ .op = RTE_ETH_INPUT_SET_SELECT,
+ .flow_type = i,
+ .inset_size = 1,
+ };
+ input_conf.field[0] = RTE_ETH_INPUT_SET_DEFAULT;
+ i40e_hash_filter_inset_select(hw, &input_conf);
}
- i40e_hw_rss_hash_set(pf, &rss_conf);
+ rss_info->conf.types = rss_conf.conf.types;
- if (i40e_rss_conf_init(rss_info, &conf->conf))
- return -EINVAL;
+ i40e_rss_clear_hash_function(pf, conf);
+
+ return 0;
+}
+
+/* Configure RSS queue region to default */
+static int
+i40e_rss_clear_queue_region(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ uint16_t queue[I40E_MAX_Q_PER_TC];
+ uint32_t num_rxq, i;
+ uint32_t lut = 0;
+ uint16_t j, num;
+
+ num_rxq = RTE_MIN(pf->dev_data->nb_rx_queues, I40E_MAX_Q_PER_TC);
+
+ for (j = 0; j < num_rxq; j++)
+ queue[j] = j;
+
+ /* If both VMDQ and RSS enabled, not all of PF queues are configured.
+ * It's necessary to calculate the actual PF queues that are configured.
+ */
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ num = i40e_pf_calc_configured_queues_num(pf);
+ else
+ num = pf->dev_data->nb_rx_queues;
+
+ num = RTE_MIN(num, num_rxq);
+ PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
+ num);
+
+ if (num == 0) {
+ PMD_DRV_LOG(ERR,
+ "No PF queues are configured to enable RSS for port %u",
+ pf->dev_data->port_id);
+ return -ENOTSUP;
+ }
+
+ /* Fill in redirection table */
+ for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+ if (j == num)
+ j = 0;
+ lut = (lut << 8) | (queue[j] & ((0x1 <<
+ hw->func_caps.rss_table_entry_width) - 1));
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+
+ rss_info->conf.queue_num = 0;
+ memset(&rss_info->conf.queue, 0, sizeof(uint16_t));
+
+ return 0;
+}
+
+int
+i40e_config_rss_filter(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf, bool add)
+{
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ struct rte_flow_action_rss update_conf = rss_info->conf;
+ int ret = 0;
+
+ if (add) {
+ if (conf->conf.queue_num) {
+ /* Configure RSS queue region */
+ ret = i40e_rss_config_queue_region(pf, conf);
+ if (ret)
+ return ret;
+
+ update_conf.queue_num = conf->conf.queue_num;
+ update_conf.queue = conf->conf.queue;
+ } else if (conf->conf.func ==
+ RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
+ /* Configure hash function */
+ ret = i40e_rss_config_hash_function(pf, conf);
+ if (ret)
+ return ret;
+
+ update_conf.func = conf->conf.func;
+ } else {
+ /* Configure hash enable and input set */
+ ret = i40e_rss_enable_hash(pf, conf);
+ if (ret)
+ return ret;
+
+ update_conf.types |= conf->conf.types;
+ update_conf.key = conf->conf.key;
+ update_conf.key_len = conf->conf.key_len;
+ }
+
+ /* Update RSS info in pf */
+ if (i40e_rss_conf_init(rss_info, &update_conf))
+ return -EINVAL;
+ } else {
+ if (!conf->valid)
+ return 0;
+
+ if (conf->conf.queue_num)
+ i40e_rss_clear_queue_region(pf);
+ else if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
+ i40e_rss_clear_hash_function(pf, conf);
+ else
+ i40e_rss_disable_hash(pf, conf);
+ }
return 0;
}
* function for RSS, or flowtype for queue region configuration.
* For example:
* pattern:
- * Case 1: only ETH, indicate flowtype for queue region will be parsed.
- * Case 2: only VLAN, indicate user_priority for queue region will be parsed.
- * Case 3: none, indicate RSS related will be parsed in action.
- * Any pattern other the ETH or VLAN will be treated as invalid except END.
+ * Case 1: try to transform patterns to pctype. valid pctype will be
+ * used in parse action.
+ * Case 2: only ETH, indicate flowtype for queue region will be parsed.
+ * Case 3: only VLAN, indicate user_priority for queue region will be parsed.
* So, pattern choice is depened on the purpose of configuration of
* that flow.
* action:
- * action RSS will be uaed to transmit valid parameter with
+ * action RSS will be used to transmit valid parameter with
* struct rte_flow_action_rss for all the 3 case.
*/
static int
i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
- uint8_t *action_flag,
+ struct i40e_rss_pattern_info *p_info,
struct i40e_queue_regions *info)
{
const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
const struct rte_flow_item *item = pattern;
enum rte_flow_item_type item_type;
-
- if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ struct rte_flow_item *items;
+ uint32_t item_num = 0; /* non-void item number of pattern*/
+ uint32_t i = 0;
+ static const struct {
+ enum rte_flow_item_type *item_array;
+ uint64_t type;
+ } i40e_rss_pctype_patterns[] = {
+ { pattern_fdir_ipv4,
+ ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER },
+ { pattern_fdir_ipv4_tcp, ETH_RSS_NONFRAG_IPV4_TCP },
+ { pattern_fdir_ipv4_udp, ETH_RSS_NONFRAG_IPV4_UDP },
+ { pattern_fdir_ipv4_sctp, ETH_RSS_NONFRAG_IPV4_SCTP },
+ { pattern_fdir_ipv6,
+ ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER },
+ { pattern_fdir_ipv6_tcp, ETH_RSS_NONFRAG_IPV6_TCP },
+ { pattern_fdir_ipv6_udp, ETH_RSS_NONFRAG_IPV6_UDP },
+ { pattern_fdir_ipv6_sctp, ETH_RSS_NONFRAG_IPV6_SCTP },
+ };
+
+ p_info->types = I40E_RSS_TYPE_INVALID;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_END) {
+ p_info->types = I40E_RSS_TYPE_NONE;
return 0;
+ }
+
+ /* Convert pattern to RSS offload types */
+ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+ if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ item_num++;
+ i++;
+ }
+ item_num++;
+
+ items = rte_zmalloc("i40e_pattern",
+ item_num * sizeof(struct rte_flow_item), 0);
+ if (!items) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No memory for PMD internal items.");
+ return -ENOMEM;
+ }
+
+ i40e_pattern_skip_void_item(items, pattern);
+
+ for (i = 0; i < RTE_DIM(i40e_rss_pctype_patterns); i++) {
+ if (i40e_match_pattern(i40e_rss_pctype_patterns[i].item_array,
+ items)) {
+ p_info->types = i40e_rss_pctype_patterns[i].type;
+ rte_free(items);
+ return 0;
+ }
+ }
+
+ rte_free(items);
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->last) {
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- *action_flag = 1;
+ p_info->action_flag = 1;
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec = item->spec;
vlan_spec->tci) >> 13) & 0x7;
info->region[0].user_priority_num = 1;
info->queue_region_number = 1;
- *action_flag = 0;
+ p_info->action_flag = 0;
}
}
break;
}
/**
- * This function is used to parse rss queue index, total queue number and
+ * This function is used to parse RSS queue index, total queue number and
* hash functions, If the purpose of this configuration is for queue region
* configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
* In queue region configuration, it also need to parse hardware flowtype
* be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
* hw_flowtype or PCTYPE max index should be 63, the user priority
* max index should be 7, and so on. And also, queue index should be
- * continuous sequence and queue region index should be part of rss
+ * continuous sequence and queue region index should be part of RSS
* queue index for this port.
+ * For hash params, the pctype in action and pattern must be same.
+ * Set queue index must be with non-types.
*/
static int
i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
- uint8_t action_flag,
+ struct i40e_rss_pattern_info p_info,
struct i40e_queue_regions *conf_info,
union i40e_filter_t *filter)
{
struct i40e_rte_flow_rss_conf *rss_config =
&filter->rss_conf;
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
- uint16_t i, j, n, tmp;
+ uint16_t i, j, n, tmp, nb_types;
uint32_t index = 0;
uint64_t hf_bit = 1;
rss = act->conf;
/**
- * rss only supports forwarding,
+ * RSS only supports forwarding,
* check if the first not void action is RSS.
*/
if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
return -rte_errno;
}
- if (action_flag) {
+ if (p_info.action_flag) {
for (n = 0; n < 64; n++) {
if (rss->types & (hf_bit << n)) {
conf_info->region[0].hw_flowtype[0] = n;
if (rss_config->queue_region_conf)
return 0;
- if (!rss || !rss->queue_num) {
+ if (!rss) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
- "no valid queues");
+ "invalid rule");
return -rte_errno;
}
}
}
- if (rss_info->conf.queue_num) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "rss only allow one valid rule");
- return -rte_errno;
+ if (rss->queue_num && (p_info.types || rss->types))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS types must be empty while configuring queue region");
+
+ /* validate pattern and pctype */
+ if (!(rss->types & p_info.types) &&
+ (rss->types || p_info.types) && !rss->queue_num)
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "invalid pctype");
+
+ nb_types = 0;
+ for (n = 0; n < RTE_ETH_FLOW_MAX; n++) {
+ if (rss->types & (hf_bit << n))
+ nb_types++;
+ if (nb_types > 1)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "multi pctype is not supported");
}
+ if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
+ (p_info.types || rss->types || rss->queue_num))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "pattern, type and queues must be empty while"
+ " setting hash function as simple_xor");
+
+ if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ &&
+ !(p_info.types && rss->types))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "pctype and queues can not be empty while"
+ " setting hash function as symmetric toeplitz");
+
/* Parse RSS related parameters from configuration */
- if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX ||
+ rss->func == RTE_ETH_HASH_FUNCTION_TOEPLITZ)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "non-default RSS hash functions are not supported");
+ "RSS hash functions are not supported");
if (rss->level)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
union i40e_filter_t *filter,
struct rte_flow_error *error)
{
- int ret;
+ struct i40e_rss_pattern_info p_info;
struct i40e_queue_regions info;
- uint8_t action_flag = 0;
+ int ret;
memset(&info, 0, sizeof(struct i40e_queue_regions));
+ memset(&p_info, 0, sizeof(struct i40e_rss_pattern_info));
ret = i40e_flow_parse_rss_pattern(dev, pattern,
- error, &action_flag, &info);
+ error, &p_info, &info);
if (ret)
return ret;
ret = i40e_flow_parse_rss_action(dev, actions, error,
- action_flag, &info, filter);
+ p_info, &info, filter);
if (ret)
return ret;
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_rss_filter *rss_filter;
int ret;
if (conf->queue_region_conf) {
ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
- conf->queue_region_conf = 0;
} else {
ret = i40e_config_rss_filter(pf, conf, 1);
}
- return ret;
+
+ if (ret)
+ return ret;
+
+ rss_filter = rte_zmalloc("i40e_rss_filter",
+ sizeof(*rss_filter), 0);
+ if (rss_filter == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ return -ENOMEM;
+ }
+ rss_filter->rss_filter_info = *conf;
+ /* the rule new created is always valid
+ * the existing rule covered by new rule will be set invalid
+ */
+ rss_filter->rss_filter_info.valid = true;
+
+ TAILQ_INSERT_TAIL(&pf->rss_config_list, rss_filter, next);
+
+ return 0;
}
static int
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_rss_filter *rss_filter;
+ void *temp;
- i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+ if (conf->queue_region_conf)
+ i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+ else
+ i40e_config_rss_filter(pf, conf, 0);
- i40e_config_rss_filter(pf, conf, 0);
+ TAILQ_FOREACH_SAFE(rss_filter, &pf->rss_config_list, next, temp) {
+ if (!memcmp(&rss_filter->rss_filter_info, conf,
+ sizeof(struct rte_flow_action_rss))) {
+ TAILQ_REMOVE(&pf->rss_config_list, rss_filter, next);
+ rte_free(rss_filter);
+ }
+ }
return 0;
}
&cons_filter.rss_conf);
if (ret)
goto free_flow;
- flow->rule = &pf->rss_info;
+ flow->rule = TAILQ_LAST(&pf->rss_config_list,
+ i40e_rss_conf_list);
break;
default:
goto free_flow;
break;
case RTE_ETH_FILTER_HASH:
ret = i40e_config_rss_filter_del(dev,
- (struct i40e_rte_flow_rss_conf *)flow->rule);
+ &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
if (ret) {
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "Failed to flush rss flows.");
+ "Failed to flush RSS flows.");
return -rte_errno;
}
return ret;
}
-/* remove the rss filter */
+/* remove the RSS filter */
static int
i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_flow *flow;
+ void *temp;
int32_t ret = -EINVAL;
ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
- if (rss_info->conf.queue_num)
- ret = i40e_config_rss_filter(pf, rss_info, FALSE);
+ /* Delete RSS flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type != RTE_ETH_FILTER_HASH)
+ continue;
+
+ if (flow->rule) {
+ ret = i40e_config_rss_filter_del(dev,
+ &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
+ if (ret)
+ return ret;
+ }
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+
return ret;
}