item, "Not supported by ntuple filter");
return -rte_errno;
}
+ if ((ipv4_mask->hdr.src_addr != 0 &&
+ ipv4_mask->hdr.src_addr != UINT32_MAX) ||
+ (ipv4_mask->hdr.dst_addr != 0 &&
+ ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
+ (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
+ ipv4_mask->hdr.next_proto_id != 0)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
filter->src_ip_mask = ipv4_mask->hdr.src_addr;
item, "Not supported by ntuple filter");
return -rte_errno;
}
+ if ((tcp_mask->hdr.src_port != 0 &&
+ tcp_mask->hdr.src_port != UINT16_MAX) ||
+ (tcp_mask->hdr.dst_port != 0 &&
+ tcp_mask->hdr.dst_port != UINT16_MAX)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
filter->dst_port_mask = tcp_mask->hdr.dst_port;
filter->src_port_mask = tcp_mask->hdr.src_port;
item, "Not supported by ntuple filter");
return -rte_errno;
}
+ if ((udp_mask->hdr.src_port != 0 &&
+ udp_mask->hdr.src_port != UINT16_MAX) ||
+ (udp_mask->hdr.dst_port != 0 &&
+ udp_mask->hdr.dst_port != UINT16_MAX)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
filter->dst_port_mask = udp_mask->hdr.dst_port;
filter->src_port_mask = udp_mask->hdr.src_port;
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
* Mask bits of destination MAC address must be full
* of 1 or full of 0.
*/
- if (!is_zero_ether_addr(ð_mask->src) ||
- (!is_zero_ether_addr(ð_mask->dst) &&
- !is_broadcast_ether_addr(ð_mask->dst))) {
+ if (!rte_is_zero_ether_addr(ð_mask->src) ||
+ (!rte_is_zero_ether_addr(ð_mask->dst) &&
+ !rte_is_broadcast_ether_addr(ð_mask->dst))) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid ether address mask");
/* If mask bits of destination MAC address
* are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
*/
- if (is_broadcast_ether_addr(ð_mask->dst)) {
+ if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
filter->mac_addr = eth_spec->dst;
filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
} else {
return -rte_errno;
}
+ /* Not supported */
+ if (attr->transfer) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* Not supported */
if (attr->priority) {
rte_flow_error_set(error, EINVAL,
return -rte_errno;
}
- if (filter->ether_type == ETHER_TYPE_IPv4 ||
- filter->ether_type == ETHER_TYPE_IPv6) {
+ if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPV6) {
memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
tcp_spec = item->spec;
tcp_mask = item->mask;
- if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
+ if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
tcp_mask->hdr.src_port ||
tcp_mask->hdr.dst_port ||
tcp_mask->hdr.sent_seq ||
tcp_mask->hdr.recv_ack ||
tcp_mask->hdr.data_off ||
- tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
+ tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
tcp_mask->hdr.rx_win ||
tcp_mask->hdr.cksum ||
tcp_mask->hdr.tcp_urp) {
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* Support 2 priorities, the lowest or highest. */
if (!attr->priority) {
filter->hig_pri = 0;
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* not supported */
if (attr->priority) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* not supported */
if (attr->priority) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
eth_spec = item->spec;
/* Get the dst MAC. */
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
rule->ixgbe_fdir.formatted.inner_mac[j] =
eth_spec->dst.addr_bytes[j];
}
* src MAC address must be masked,
* and don't support dst MAC address mask.
*/
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
if (eth_mask->src.addr_bytes[j] ||
eth_mask->dst.addr_bytes[j] != 0xFF) {
memset(rule, 0,
return -rte_errno;
}
} else {
- if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
/* Get the VxLAN info */
if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
rule->ixgbe_fdir.formatted.tunnel_type =
- RTE_FDIR_TUNNEL_TYPE_VXLAN;
+ IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
/* Only care about VNI, others should be masked. */
if (!item->mask) {
rule->b_spec = TRUE;
vxlan_spec = item->spec;
rte_memcpy(((uint8_t *)
- &rule->ixgbe_fdir.formatted.tni_vni + 1),
+ &rule->ixgbe_fdir.formatted.tni_vni),
vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
- rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
- rule->ixgbe_fdir.formatted.tni_vni);
}
}
/* Get the NVGRE info */
if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
rule->ixgbe_fdir.formatted.tunnel_type =
- RTE_FDIR_TUNNEL_TYPE_NVGRE;
+ IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
/**
* Only care about flags0, flags1, protocol and TNI,
/* tni is a 24-bits bit field */
rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
- rule->ixgbe_fdir.formatted.tni_vni <<= 8;
}
}
}
/* src MAC address should be masked. */
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
if (eth_mask->src.addr_bytes[j]) {
memset(rule, 0,
sizeof(struct ixgbe_fdir_rule));
}
}
rule->mask.mac_addr_byte_mask = 0;
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
/* It's a per byte mask. */
if (eth_mask->dst.addr_bytes[j] == 0xFF) {
rule->mask.mac_addr_byte_mask |= 0x1 << j;
eth_spec = item->spec;
/* Get the dst MAC. */
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
rule->ixgbe_fdir.formatted.inner_mac[j] =
eth_spec->dst.addr_bytes[j];
}
}
}
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
/* check if the next not void item is END */
act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
rte_flow_error_set(error, EINVAL,