#include <unistd.h>
#include <stdarg.h>
+#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
#include <rte_log.h>
struct rte_flow_error *error,
struct rte_eth_ethertype_filter *filter);
static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
struct i40e_fdir_filter_conf *filter);
flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
(raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
(layer_idx * I40E_MAX_FLXPLD_FIED);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
- i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
}
/* Set flex pit */
*/
static int
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
struct i40e_fdir_filter_conf *filter)
"Invalid MAC_addr mask.");
return -rte_errno;
}
+ }
+ if (eth_spec && eth_mask && eth_mask->type) {
+ enum rte_flow_item_type next = (item + 1)->type;
- if ((eth_mask->type & UINT16_MAX) ==
- UINT16_MAX) {
- input_set |= I40E_INSET_LAST_ETHER_TYPE;
- filter->input.flow.l2_flow.ether_type =
- eth_spec->type;
+ if (eth_mask->type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid type mask.");
+ return -rte_errno;
}
ether_type = rte_be_to_cpu_16(eth_spec->type);
- if (ether_type == ETHER_TYPE_IPv4 ||
+
+ if (next == RTE_FLOW_ITEM_TYPE_VLAN ||
+ ether_type == ETHER_TYPE_IPv4 ||
ether_type == ETHER_TYPE_IPv6 ||
ether_type == ETHER_TYPE_ARP ||
ether_type == outer_tpid) {
"Unsupported ether_type.");
return -rte_errno;
}
+ input_set |= I40E_INSET_LAST_ETHER_TYPE;
+ filter->input.flow.l2_flow.ether_type =
+ eth_spec->type;
}
pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec = item->spec;
vlan_mask = item->mask;
+
+ RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
if (vlan_spec && vlan_mask) {
if (vlan_mask->tci ==
rte_cpu_to_be_16(I40E_TCI_MASK)) {
vlan_spec->tci;
}
}
+ if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
+ if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid inner_type"
+ " mask.");
+ return -rte_errno;
+ }
+
+ ether_type =
+ rte_be_to_cpu_16(vlan_spec->inner_type);
+
+ if (ether_type == ETHER_TYPE_IPv4 ||
+ ether_type == ETHER_TYPE_IPv6 ||
+ ether_type == ETHER_TYPE_ARP ||
+ ether_type == outer_tpid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported inner_type.");
+ return -rte_errno;
+ }
+ input_set |= I40E_INSET_LAST_ETHER_TYPE;
+ filter->input.flow.l2_flow.ether_type =
+ vlan_spec->inner_type;
+ }
pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
layer_idx = I40E_FLXPLD_L2_IDX;
break;
case RTE_FLOW_ITEM_TYPE_VF:
vf_spec = item->spec;
+ if (!attr->transfer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Matching VF traffic"
+ " without affecting it"
+ " (transfer attribute)"
+ " is unsupported");
+ return -rte_errno;
+ }
filter->input.flow_ext.is_vf = 1;
filter->input.flow_ext.dst_id = vf_spec->id;
if (filter->input.flow_ext.is_vf &&
struct rte_flow_error *error,
union i40e_filter_t *filter)
{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_fdir_filter_conf *fdir_filter =
&filter->fdir_filter;
int ret;
- ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
+ ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
+ fdir_filter);
if (ret)
return ret;
if (dev->data->dev_conf.fdir_conf.mode !=
RTE_FDIR_MODE_PERFECT) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "Check the mode in fdir_conf.");
- return -rte_errno;
+ /* Enable fdir when fdir flow is added at first time. */
+ ret = i40e_fdir_setup(pf);
+ if (ret != I40E_SUCCESS) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to setup fdir.");
+ return -rte_errno;
+ }
+ ret = i40e_fdir_configure(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to configure fdir.");
+ goto err;
+ }
+
+ dev->data->dev_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
}
return 0;
+err:
+ i40e_fdir_teardown(pf);
+ return -rte_errno;
}
/* Parse to get the action info of a tunnel filter
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec = item->spec;
vlan_mask = item->mask;
- if (!(vlan_spec && vlan_mask)) {
+ if (!(vlan_spec && vlan_mask) ||
+ vlan_mask->inner_type) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec = item->spec;
vlan_mask = item->mask;
- if (!(vlan_spec && vlan_mask)) {
+ if (!(vlan_spec && vlan_mask) ||
+ vlan_mask->inner_type) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
vlan_spec = item->spec;
vlan_mask = item->mask;
- if (!(vlan_spec && vlan_mask)) {
+ if (!(vlan_spec && vlan_mask) ||
+ vlan_mask->inner_type) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
return 0;
}
+/**
+ * This function is used to parse rss queue index, total queue number and
+ * hash functions, If the purpose of this configuration is for queue region
+ * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
+ * In queue region configuration, it also need to parse hardware flowtype
+ * and user_priority from configuration, it will also cheeck the validity
+ * of these parameters. For example, The queue region sizes should
+ * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
+ * hw_flowtype or PCTYPE max index should be 63, the user priority
+ * max index should be 7, and so on. And also, queue index should be
+ * continuous sequence and queue region index should be part of rss
+ * queue index for this port.
+ */
static int
i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
if (action_flag) {
for (n = 0; n < 64; n++) {
- if (rss->rss_conf->rss_hf & (hf_bit << n)) {
+ if (rss->types & (hf_bit << n)) {
conf_info->region[0].hw_flowtype[0] = n;
conf_info->region[0].flowtype_num = 1;
conf_info->queue_region_number = 1;
}
}
+ /**
+ * Do some queue region related parameters check
+ * in order to keep queue index for queue region to be
+ * continuous sequence and also to be part of RSS
+ * queue index for this port.
+ */
if (conf_info->queue_region_number) {
- for (i = 0; i < rss->num; i++) {
- for (j = 0; j < rss_info->num; j++) {
- if (rss->queue[i] == rss_info->queue[j])
+ for (i = 0; i < rss->queue_num; i++) {
+ for (j = 0; j < rss_info->conf.queue_num; j++) {
+ if (rss->queue[i] == rss_info->conf.queue[j])
break;
}
- if (j == rss_info->num) {
+ if (j == rss_info->conf.queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
}
}
- for (i = 0; i < rss->num - 1; i++) {
+ for (i = 0; i < rss->queue_num - 1; i++) {
if (rss->queue[i + 1] != rss->queue[i] + 1) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
}
}
+ /* Parse queue region related parameters from configuration */
for (n = 0; n < conf_info->queue_region_number; n++) {
if (conf_info->region[n].user_priority_num ||
conf_info->region[n].flowtype_num) {
- if (!((rte_is_power_of_2(rss->num)) &&
- rss->num <= 64)) {
- PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
- "total number of queues do not exceed the VSI allocation");
+ if (!((rte_is_power_of_2(rss->queue_num)) &&
+ rss->queue_num <= 64)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+ "total number of queues do not exceed the VSI allocation");
return -rte_errno;
}
if (conf_info->region[n].user_priority[n] >=
I40E_MAX_USER_PRIORITY) {
- PMD_DRV_LOG(ERR, "the user priority max index is 7");
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "the user priority max index is 7");
return -rte_errno;
}
if (conf_info->region[n].hw_flowtype[n] >=
I40E_FILTER_PCTYPE_MAX) {
- PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
- return -rte_errno;
- }
-
- if (rss_info->num < rss->num ||
- rss->queue[0] < rss_info->queue[0] ||
- (rss->queue[0] + rss->num >
- rss_info->num + rss_info->queue[0])) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
- "no valid queues");
+ "the hw_flowtype or PCTYPE max index is 63");
return -rte_errno;
}
for (i = 0; i < info->queue_region_number; i++) {
- if (info->region[i].queue_num == rss->num &&
+ if (info->region[i].queue_num ==
+ rss->queue_num &&
info->region[i].queue_start_index ==
rss->queue[0])
break;
if (i == info->queue_region_number) {
if (i > I40E_REGION_MAX_INDEX) {
- PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "the queue region max index is 7");
return -rte_errno;
}
info->region[i].queue_num =
- rss->num;
+ rss->queue_num;
info->region[i].queue_start_index =
rss->queue[0];
info->region[i].region_id =
rss_config->queue_region_conf = TRUE;
}
+ /**
+ * Return function if this flow is used for queue region configuration
+ */
if (rss_config->queue_region_conf)
return 0;
- if (!rss || !rss->num) {
+ if (!rss || !rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
return -rte_errno;
}
- for (n = 0; n < rss->num; n++) {
+ for (n = 0; n < rss->queue_num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
return -rte_errno;
}
}
- if (rss->rss_conf)
- rss_config->rss_conf = *rss->rss_conf;
- else
- rss_config->rss_conf.rss_hf =
- pf->adapter->flow_types_mask;
- for (n = 0; n < rss->num; ++n)
- rss_config->queue[n] = rss->queue[n];
- rss_config->num = rss->num;
+ /* Parse RSS related parameters from configuration */
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
+ if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key too large");
+ if (rss->queue_num > RTE_DIM(rss_config->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (i40e_rss_conf_init(rss_config, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
+
index++;
/* check if the next not void action is END */
case RTE_ETH_FILTER_FDIR:
ret = i40e_flow_add_del_fdir_filter(dev,
&((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
+
+ /* If the last flow is destroyed, disable fdir. */
+ if (!ret && !TAILQ_EMPTY(&pf->fdir.fdir_list)) {
+ i40e_fdir_teardown(pf);
+ dev->data->dev_conf.fdir_conf.mode =
+ RTE_FDIR_MODE_NONE;
+ }
break;
case RTE_ETH_FILTER_HASH:
ret = i40e_config_rss_filter_del(dev,
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_vsi *vsi;
struct i40e_pf_vf *vf;
- struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
+ struct i40e_aqc_cloud_filters_element_bb cld_filter;
struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
struct i40e_tunnel_filter *node;
bool big_buffer = 0;
big_buffer = 1;
if (big_buffer)
- ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
- &cld_filter, 1);
+ ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid,
+ &cld_filter, 1);
else
- ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
- &cld_filter.element, 1);
+ ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
+ &cld_filter.element, 1);
if (ret < 0)
return -ENOTSUP;
pf->fdir.inset_flag[pctype] = 0;
}
+ i40e_fdir_teardown(pf);
+
return ret;
}
ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
- if (rss_info->num)
+ if (rss_info->conf.queue_num)
ret = i40e_config_rss_filter(pf, rss_info, FALSE);
return ret;
}