item, "Not supported by ntuple filter");
return -rte_errno;
}
+ if ((ipv4_mask->hdr.src_addr != 0 &&
+ ipv4_mask->hdr.src_addr != UINT32_MAX) ||
+ (ipv4_mask->hdr.dst_addr != 0 &&
+ ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
+ (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
+ ipv4_mask->hdr.next_proto_id != 0)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
filter->src_ip_mask = ipv4_mask->hdr.src_addr;
item, "Not supported by ntuple filter");
return -rte_errno;
}
+ if ((tcp_mask->hdr.src_port != 0 &&
+ tcp_mask->hdr.src_port != UINT16_MAX) ||
+ (tcp_mask->hdr.dst_port != 0 &&
+ tcp_mask->hdr.dst_port != UINT16_MAX)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
filter->dst_port_mask = tcp_mask->hdr.dst_port;
filter->src_port_mask = tcp_mask->hdr.src_port;
item, "Not supported by ntuple filter");
return -rte_errno;
}
+ if ((udp_mask->hdr.src_port != 0 &&
+ udp_mask->hdr.src_port != UINT16_MAX) ||
+ (udp_mask->hdr.dst_port != 0 &&
+ udp_mask->hdr.dst_port != UINT16_MAX)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
filter->dst_port_mask = udp_mask->hdr.dst_port;
filter->src_port_mask = udp_mask->hdr.src_port;
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
* Mask bits of destination MAC address must be full
* of 1 or full of 0.
*/
- if (!is_zero_ether_addr(ð_mask->src) ||
- (!is_zero_ether_addr(ð_mask->dst) &&
- !is_broadcast_ether_addr(ð_mask->dst))) {
+ if (!rte_is_zero_ether_addr(ð_mask->src) ||
+ (!rte_is_zero_ether_addr(ð_mask->dst) &&
+ !rte_is_broadcast_ether_addr(ð_mask->dst))) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid ether address mask");
/* If mask bits of destination MAC address
* are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
*/
- if (is_broadcast_ether_addr(ð_mask->dst)) {
+ if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
filter->mac_addr = eth_spec->dst;
filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
} else {
return -rte_errno;
}
+ /* Not supported */
+ if (attr->transfer) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* Not supported */
if (attr->priority) {
rte_flow_error_set(error, EINVAL,
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* Support 2 priorities, the lowest or highest. */
if (!attr->priority) {
filter->hig_pri = 0;
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* not supported */
if (attr->priority) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* not supported */
if (attr->priority) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
return -rte_errno;
}
} else {
- if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
/* Get the VxLAN info */
if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
rule->ixgbe_fdir.formatted.tunnel_type =
- RTE_FDIR_TUNNEL_TYPE_VXLAN;
+ IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
/* Only care about VNI, others should be masked. */
if (!item->mask) {
rule->b_spec = TRUE;
vxlan_spec = item->spec;
rte_memcpy(((uint8_t *)
- &rule->ixgbe_fdir.formatted.tni_vni + 1),
+ &rule->ixgbe_fdir.formatted.tni_vni),
vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
- rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
- rule->ixgbe_fdir.formatted.tni_vni);
}
}
/* Get the NVGRE info */
if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
rule->ixgbe_fdir.formatted.tunnel_type =
- RTE_FDIR_TUNNEL_TYPE_NVGRE;
+ IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
/**
* Only care about flags0, flags1, protocol and TNI,
/* tni is a 24-bits bit field */
rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
- rule->ixgbe_fdir.formatted.tni_vni <<= 8;
}
}
rss = (const struct rte_flow_action_rss *)act->conf;
- if (!rss || !rss->num) {
+ if (!rss || !rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
return -rte_errno;
}
- for (n = 0; n < rss->num; n++) {
+ for (n = 0; n < rss->queue_num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
return -rte_errno;
}
}
- if (rss->rss_conf)
- rss_conf->rss_conf = *rss->rss_conf;
- else
- rss_conf->rss_conf.rss_hf = IXGBE_RSS_OFFLOAD_ALL;
- for (n = 0; n < rss->num; ++n)
- rss_conf->queue[n] = rss->queue[n];
- rss_conf->num = rss->num;
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
+ if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key must be exactly 40 bytes");
+ if (rss->queue_num > RTE_DIM(rss_conf->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (ixgbe_rss_conf_init(rss_conf, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
/* check if the next not void item is END */
act = next_no_void_action(actions, act);
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
rte_flow_error_set(error, EINVAL,
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
}
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(&rss_filter_ptr->filter_info,
- &rss_conf,
- sizeof(struct ixgbe_rte_flow_rss_conf));
+ ixgbe_rss_conf_init(&rss_filter_ptr->filter_info,
+ &rss_conf.conf);
TAILQ_INSERT_TAIL(&filter_rss_list,
rss_filter_ptr, entries);
flow->rule = rss_filter_ptr;