X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_flow.c;h=23aba0a47139eba532a478136232b560a9c8c239;hb=35b2d13fd6fdcbd191f2a30d74648faeb1186c65;hp=00d975b93d2b2571da85cc2d4fa75676b4f5a3df;hpb=929e3319342b7ae2ec0c0fc5c5a4e954037d7d29;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index 00d975b93d..23aba0a471 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -363,6 +363,17 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, item, "Not supported by ntuple filter"); return -rte_errno; } + if ((ipv4_mask->hdr.src_addr != 0 && + ipv4_mask->hdr.src_addr != UINT32_MAX) || + (ipv4_mask->hdr.dst_addr != 0 && + ipv4_mask->hdr.dst_addr != UINT32_MAX) || + (ipv4_mask->hdr.next_proto_id != UINT8_MAX && + ipv4_mask->hdr.next_proto_id != 0)) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } filter->dst_ip_mask = ipv4_mask->hdr.dst_addr; filter->src_ip_mask = ipv4_mask->hdr.src_addr; @@ -432,6 +443,15 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, item, "Not supported by ntuple filter"); return -rte_errno; } + if ((tcp_mask->hdr.src_port != 0 && + tcp_mask->hdr.src_port != UINT16_MAX) || + (tcp_mask->hdr.dst_port != 0 && + tcp_mask->hdr.dst_port != UINT16_MAX)) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } filter->dst_port_mask = tcp_mask->hdr.dst_port; filter->src_port_mask = tcp_mask->hdr.src_port; @@ -467,6 +487,15 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, item, "Not supported by ntuple filter"); return -rte_errno; } + if ((udp_mask->hdr.src_port != 0 && + udp_mask->hdr.src_port != UINT16_MAX) || + (udp_mask->hdr.dst_port != 0 && + udp_mask->hdr.dst_port != UINT16_MAX)) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } filter->dst_port_mask = udp_mask->hdr.dst_port; filter->src_port_mask = udp_mask->hdr.src_port; @@ -557,6 +586,15 @@ action: return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -706,9 +744,9 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, * Mask bits of destination MAC address must be full * of 1 or full of 0. */ - if (!is_zero_ether_addr(ð_mask->src) || - (!is_zero_ether_addr(ð_mask->dst) && - !is_broadcast_ether_addr(ð_mask->dst))) { + if (!rte_is_zero_ether_addr(ð_mask->src) || + (!rte_is_zero_ether_addr(ð_mask->dst) && + !rte_is_broadcast_ether_addr(ð_mask->dst))) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid ether address mask"); @@ -725,7 +763,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, /* If mask bits of destination MAC address * are full of 1, set RTE_ETHTYPE_FLAGS_MAC. */ - if (is_broadcast_ether_addr(ð_mask->dst)) { + if (rte_is_broadcast_ether_addr(ð_mask->dst)) { filter->mac_addr = eth_spec->dst; filter->flags |= RTE_ETHTYPE_FLAGS_MAC; } else { @@ -786,6 +824,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* Not supported */ + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Not supported */ if (attr->priority) { rte_flow_error_set(error, EINVAL, @@ -841,8 +887,8 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev, return -rte_errno; } - if (filter->ether_type == ETHER_TYPE_IPv4 || - filter->ether_type == ETHER_TYPE_IPv6) { + if (filter->ether_type == RTE_ETHER_TYPE_IPv4 || + filter->ether_type == RTE_ETHER_TYPE_IPv6) { memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1078,6 +1124,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Support 2 priorities, the lowest or highest. */ if (!attr->priority) { filter->hig_pri = 0; @@ -1249,6 +1304,15 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* not supported */ if (attr->priority) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); @@ -1353,6 +1417,15 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* not supported */ if (attr->priority) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1632,7 +1705,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, eth_spec = item->spec; /* Get the dst MAC. */ - for (j = 0; j < ETHER_ADDR_LEN; j++) { + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { rule->ixgbe_fdir.formatted.inner_mac[j] = eth_spec->dst.addr_bytes[j]; } @@ -1661,7 +1734,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, * src MAC address must be masked, * and don't support dst MAC address mask. */ - for (j = 0; j < ETHER_ADDR_LEN; j++) { + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { if (eth_mask->src.addr_bytes[j] || eth_mask->dst.addr_bytes[j] != 0xFF) { memset(rule, 0, @@ -1695,7 +1768,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } } else { - if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_VLAN) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -2393,7 +2467,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* Get the VxLAN info */ if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) { rule->ixgbe_fdir.formatted.tunnel_type = - RTE_FDIR_TUNNEL_TYPE_VXLAN; + IXGBE_FDIR_VXLAN_TUNNEL_TYPE; /* Only care about VNI, others should be masked. */ if (!item->mask) { @@ -2443,17 +2517,15 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, rule->b_spec = TRUE; vxlan_spec = item->spec; rte_memcpy(((uint8_t *) - &rule->ixgbe_fdir.formatted.tni_vni + 1), + &rule->ixgbe_fdir.formatted.tni_vni), vxlan_spec->vni, RTE_DIM(vxlan_spec->vni)); - rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32( - rule->ixgbe_fdir.formatted.tni_vni); } } /* Get the NVGRE info */ if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) { rule->ixgbe_fdir.formatted.tunnel_type = - RTE_FDIR_TUNNEL_TYPE_NVGRE; + IXGBE_FDIR_NVGRE_TUNNEL_TYPE; /** * Only care about flags0, flags1, protocol and TNI, @@ -2543,7 +2615,6 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* tni is a 24-bits bit field */ rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni, nvgre_spec->tni, RTE_DIM(nvgre_spec->tni)); - rule->ixgbe_fdir.formatted.tni_vni <<= 8; } } @@ -2589,7 +2660,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, } /* src MAC address should be masked. */ - for (j = 0; j < ETHER_ADDR_LEN; j++) { + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { if (eth_mask->src.addr_bytes[j]) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2600,7 +2671,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, } } rule->mask.mac_addr_byte_mask = 0; - for (j = 0; j < ETHER_ADDR_LEN; j++) { + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { /* It's a per byte mask. */ if (eth_mask->dst.addr_bytes[j] == 0xFF) { rule->mask.mac_addr_byte_mask |= 0x1 << j; @@ -2621,7 +2692,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, eth_spec = item->spec; /* Get the dst MAC. */ - for (j = 0; j < ETHER_ADDR_LEN; j++) { + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { rule->ixgbe_fdir.formatted.inner_mac[j] = eth_spec->dst.addr_bytes[j]; } @@ -2783,6 +2854,10 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev, return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, @@ -2825,6 +2900,15 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); rte_flow_error_set(error, EINVAL,