X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_flow.c;h=23aba0a47139eba532a478136232b560a9c8c239;hb=35b2d13fd6fdcbd191f2a30d74648faeb1186c65;hp=cb177f3232601a1776cc2c77d0ef1c8abbb4f0c3;hpb=627a32214f0070812e7373d144e308e786eb8c2a;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index cb177f3232..23aba0a471 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation */ #include @@ -51,11 +22,10 @@ #include #include #include -#include #include #include #include -#include +#include #include #include #include @@ -79,6 +49,58 @@ #define IXGBE_MAX_N_TUPLE_PRIO 7 #define IXGBE_MAX_FLX_SOURCE_OFF 62 +/* ntuple filter list structure */ +struct ixgbe_ntuple_filter_ele { + TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries; + struct rte_eth_ntuple_filter filter_info; +}; +/* ethertype filter list structure */ +struct ixgbe_ethertype_filter_ele { + TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries; + struct rte_eth_ethertype_filter filter_info; +}; +/* syn filter list structure */ +struct ixgbe_eth_syn_filter_ele { + TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries; + struct rte_eth_syn_filter filter_info; +}; +/* fdir filter list structure */ +struct ixgbe_fdir_rule_ele { + TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries; + struct ixgbe_fdir_rule filter_info; +}; +/* l2_tunnel filter list structure */ +struct ixgbe_eth_l2_tunnel_conf_ele { + TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries; + struct rte_eth_l2_tunnel_conf filter_info; +}; +/* rss filter list structure */ +struct ixgbe_rss_conf_ele { + TAILQ_ENTRY(ixgbe_rss_conf_ele) entries; + struct ixgbe_rte_flow_rss_conf filter_info; +}; +/* ixgbe_flow memory list structure */ +struct ixgbe_flow_mem { + TAILQ_ENTRY(ixgbe_flow_mem) entries; + struct rte_flow *flow; +}; + +TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele); +TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele); +TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele); +TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele); +TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele); +TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele); +TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem); + +static struct ixgbe_ntuple_filter_list filter_ntuple_list; +static struct ixgbe_ethertype_filter_list filter_ethertype_list; +static struct ixgbe_syn_filter_list filter_syn_list; +static struct ixgbe_fdir_rule_filter_list filter_fdir_list; +static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list; +static struct ixgbe_rss_filter_list filter_rss_list; +static struct ixgbe_flow_mem_list ixgbe_flow_list; + /** * Endless loop will never happen with below assumption * 1. there is at least one no-void item(END) @@ -142,6 +164,9 @@ const struct rte_flow_action *next_no_void_action( * END * other members in mask and spec should set to 0x00. * item->last should be NULL. + * + * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY. + * */ static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr, @@ -160,6 +185,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_udp *udp_mask; const struct rte_flow_item_sctp *sctp_spec; const struct rte_flow_item_sctp *sctp_mask; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + const struct rte_flow_item_vlan *vlan_spec; + const struct rte_flow_item_vlan *vlan_mask; + struct rte_flow_item_eth eth_null; + struct rte_flow_item_vlan vlan_null; if (!pattern) { rte_flow_error_set(error, @@ -181,6 +212,46 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } + memset(ð_null, 0, sizeof(struct rte_flow_item_eth)); + memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan)); + +#ifdef RTE_LIBRTE_SECURITY + /** + * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY + */ + act = next_no_void_action(actions, NULL); + if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) { + const void *conf = act->conf; + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* get the IP pattern*/ + item = next_no_void_pattern(pattern, NULL); + while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6) { + if (item->last || + item->type == RTE_FLOW_ITEM_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "IP pattern missing."); + return -rte_errno; + } + item = next_no_void_pattern(pattern, item); + } + + filter->proto = IPPROTO_ESP; + return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec, + item->type == RTE_FLOW_ITEM_TYPE_IPV6); + } +#endif + /* the first not void item can be MAC or IPv4 */ item = next_no_void_pattern(pattern, NULL); @@ -193,6 +264,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } /* Skip Ethernet */ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + eth_spec = item->spec; + eth_mask = item->mask; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, @@ -203,15 +276,20 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } /* if the first item is MAC, the content should be NULL */ - if (item->spec || item->mask) { + if ((item->spec || item->mask) && + (memcmp(eth_spec, ð_null, + sizeof(struct rte_flow_item_eth)) || + memcmp(eth_mask, ð_null, + sizeof(struct rte_flow_item_eth)))) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); return -rte_errno; } - /* check if the next not void item is IPv4 */ + /* check if the next not void item is IPv4 or Vlan */ item = next_no_void_pattern(pattern, item); - if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_VLAN) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -219,54 +297,100 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } } - /* get the IPv4 info */ - if (!item->spec || !item->mask) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Invalid ntuple mask"); - return -rte_errno; - } - /*Not supported last point for range*/ - if (item->last) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - item, "Not supported last point for range"); - return -rte_errno; + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + vlan_spec = item->spec; + vlan_mask = item->mask; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /* the content should be NULL */ + if ((item->spec || item->mask) && + (memcmp(vlan_spec, &vlan_null, + sizeof(struct rte_flow_item_vlan)) || + memcmp(vlan_mask, &vlan_null, + sizeof(struct rte_flow_item_vlan)))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + /* check if the next not void item is IPv4 */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } } - ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; - /** - * Only support src & dst addresses, protocol, - * others should be masked. - */ - if (ipv4_mask->hdr.version_ihl || - ipv4_mask->hdr.type_of_service || - ipv4_mask->hdr.total_length || - ipv4_mask->hdr.packet_id || - ipv4_mask->hdr.fragment_offset || - ipv4_mask->hdr.time_to_live || - ipv4_mask->hdr.hdr_checksum) { + if (item->mask) { + /* get the IPv4 info */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ntuple mask"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + ipv4_mask = item->mask; + /** + * Only support src & dst addresses, protocol, + * others should be masked. + */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.type_of_service || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.hdr_checksum) { rte_flow_error_set(error, - EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by ntuple filter"); - return -rte_errno; - } + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + if ((ipv4_mask->hdr.src_addr != 0 && + ipv4_mask->hdr.src_addr != UINT32_MAX) || + (ipv4_mask->hdr.dst_addr != 0 && + ipv4_mask->hdr.dst_addr != UINT32_MAX) || + (ipv4_mask->hdr.next_proto_id != UINT8_MAX && + ipv4_mask->hdr.next_proto_id != 0)) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } - filter->dst_ip_mask = ipv4_mask->hdr.dst_addr; - filter->src_ip_mask = ipv4_mask->hdr.src_addr; - filter->proto_mask = ipv4_mask->hdr.next_proto_id; + filter->dst_ip_mask = ipv4_mask->hdr.dst_addr; + filter->src_ip_mask = ipv4_mask->hdr.src_addr; + filter->proto_mask = ipv4_mask->hdr.next_proto_id; - ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; - filter->dst_ip = ipv4_spec->hdr.dst_addr; - filter->src_ip = ipv4_spec->hdr.src_addr; - filter->proto = ipv4_spec->hdr.next_proto_id; + ipv4_spec = item->spec; + filter->dst_ip = ipv4_spec->hdr.dst_addr; + filter->src_ip = ipv4_spec->hdr.src_addr; + filter->proto = ipv4_spec->hdr.next_proto_id; + } /* check if the next not void item is TCP or UDP */ item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && - item->type != RTE_FLOW_ITEM_TYPE_SCTP) { + item->type != RTE_FLOW_ITEM_TYPE_SCTP && + item->type != RTE_FLOW_ITEM_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -274,8 +398,14 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* get the TCP/UDP info */ - if (!item->spec || !item->mask) { + if ((item->type != RTE_FLOW_ITEM_TYPE_END) && + (!item->spec && !item->mask)) { + goto action; + } + + /* get the TCP/UDP/SCTP info */ + if (item->type != RTE_FLOW_ITEM_TYPE_END && + (!item->spec || !item->mask)) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -294,7 +424,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_mask = item->mask; /** * Only support src & dst ports, tcp flags, @@ -313,6 +443,15 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, item, "Not supported by ntuple filter"); return -rte_errno; } + if ((tcp_mask->hdr.src_port != 0 && + tcp_mask->hdr.src_port != UINT16_MAX) || + (tcp_mask->hdr.dst_port != 0 && + tcp_mask->hdr.dst_port != UINT16_MAX)) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } filter->dst_port_mask = tcp_mask->hdr.dst_port; filter->src_port_mask = tcp_mask->hdr.src_port; @@ -328,12 +467,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_spec = item->spec; filter->dst_port = tcp_spec->hdr.dst_port; filter->src_port = tcp_spec->hdr.src_port; filter->tcp_flags = tcp_spec->hdr.tcp_flags; } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_mask = item->mask; /** * Only support src & dst ports, @@ -348,15 +487,24 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, item, "Not supported by ntuple filter"); return -rte_errno; } + if ((udp_mask->hdr.src_port != 0 && + udp_mask->hdr.src_port != UINT16_MAX) || + (udp_mask->hdr.dst_port != 0 && + udp_mask->hdr.dst_port != UINT16_MAX)) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } filter->dst_port_mask = udp_mask->hdr.dst_port; filter->src_port_mask = udp_mask->hdr.src_port; - udp_spec = (const struct rte_flow_item_udp *)item->spec; + udp_spec = item->spec; filter->dst_port = udp_spec->hdr.dst_port; filter->src_port = udp_spec->hdr.src_port; - } else { - sctp_mask = (const struct rte_flow_item_sctp *)item->mask; + } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) { + sctp_mask = item->mask; /** * Only support src & dst ports, @@ -375,9 +523,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = sctp_mask->hdr.dst_port; filter->src_port_mask = sctp_mask->hdr.src_port; - sctp_spec = (const struct rte_flow_item_sctp *)item->spec; + sctp_spec = item->spec; filter->dst_port = sctp_spec->hdr.dst_port; filter->src_port = sctp_spec->hdr.src_port; + } else { + goto action; } /* check if the next not void item is END */ @@ -390,6 +540,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } +action: + /** * n-tuple only supports forwarding, * check if the first not void action is QUEUE. @@ -434,6 +586,15 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -468,6 +629,12 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev, if (ret) return ret; +#ifdef RTE_LIBRTE_SECURITY + /* ESP flow not really a flow*/ + if (filter->proto == IPPROTO_ESP) + return 0; +#endif + /* Ixgbe doesn't support tcp flags. */ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); @@ -487,9 +654,7 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev, return -rte_errno; } - if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || - filter->priority > IXGBE_5TUPLE_MAX_PRI || - filter->priority < IXGBE_5TUPLE_MIN_PRI) + if (filter->queue >= dev->data->nb_rx_queues) return -rte_errno; /* fixed value for ixgbe */ @@ -572,16 +737,16 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /* Mask bits of source MAC address must be full of 0. * Mask bits of destination MAC address must be full * of 1 or full of 0. */ - if (!is_zero_ether_addr(ð_mask->src) || - (!is_zero_ether_addr(ð_mask->dst) && - !is_broadcast_ether_addr(ð_mask->dst))) { + if (!rte_is_zero_ether_addr(ð_mask->src) || + (!rte_is_zero_ether_addr(ð_mask->dst) && + !rte_is_broadcast_ether_addr(ð_mask->dst))) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid ether address mask"); @@ -598,7 +763,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, /* If mask bits of destination MAC address * are full of 1, set RTE_ETHTYPE_FLAGS_MAC. */ - if (is_broadcast_ether_addr(ð_mask->dst)) { + if (rte_is_broadcast_ether_addr(ð_mask->dst)) { filter->mac_addr = eth_spec->dst; filter->flags |= RTE_ETHTYPE_FLAGS_MAC; } else { @@ -659,6 +824,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* Not supported */ + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Not supported */ if (attr->priority) { rte_flow_error_set(error, EINVAL, @@ -706,7 +879,7 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev, return -rte_errno; } - if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) { + if (filter->queue >= dev->data->nb_rx_queues) { memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -714,8 +887,8 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev, return -rte_errno; } - if (filter->ether_type == ETHER_TYPE_IPv4 || - filter->ether_type == ETHER_TYPE_IPv6) { + if (filter->ether_type == RTE_ETHER_TYPE_IPv4 || + filter->ether_type == RTE_ETHER_TYPE_IPv6) { memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -873,8 +1046,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_spec = item->spec; + tcp_mask = item->mask; if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) || tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port || @@ -951,6 +1124,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Support 2 priorities, the lowest or highest. */ if (!attr->priority) { filter->hig_pri = 0; @@ -983,6 +1165,9 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev, ret = cons_parse_syn_filter(attr, pattern, actions, filter, error); + if (filter->queue >= dev->data->nb_rx_queues) + return -rte_errno; + if (ret) return ret; @@ -997,7 +1182,7 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev, * The first not void item can be E_TAG. * The next not void item must be END. * action: - * The first not void action should be QUEUE. + * The first not void action should be VF or PF. * The next not void action should be END. * pattern example: * ITEM Spec Mask @@ -1008,7 +1193,8 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev, * item->last should be NULL. */ static int -cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, +cons_parse_l2_tn_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_eth_l2_tunnel_conf *filter, @@ -1018,7 +1204,8 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_e_tag *e_tag_spec; const struct rte_flow_item_e_tag *e_tag_mask; const struct rte_flow_action *act; - const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_vf *act_vf; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -1066,8 +1253,8 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec; - e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask; + e_tag_spec = item->spec; + e_tag_mask = item->mask; /* Only care about GRP and E cid base. */ if (e_tag_mask->epcp_edei_in_ecid_b || @@ -1117,6 +1304,15 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* not supported */ if (attr->priority) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); @@ -1126,9 +1322,10 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* check if the first not void action is QUEUE. */ + /* check if the first not void action is VF or PF. */ act = next_no_void_action(actions, NULL); - if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + if (act->type != RTE_FLOW_ACTION_TYPE_VF && + act->type != RTE_FLOW_ACTION_TYPE_PF) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -1136,8 +1333,12 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - act_q = (const struct rte_flow_action_queue *)act->conf; - filter->pool = act_q->index; + if (act->type == RTE_FLOW_ACTION_TYPE_VF) { + act_vf = (const struct rte_flow_action_vf *)act->conf; + filter->pool = act_vf->id; + } else { + filter->pool = pci_dev->max_vfs; + } /* check if the next not void item is END */ act = next_no_void_action(actions, act); @@ -1162,8 +1363,10 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, { int ret = 0; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + uint16_t vf_num; - ret = cons_parse_l2_tn_filter(attr, pattern, + ret = cons_parse_l2_tn_filter(dev, attr, pattern, actions, l2_tn_filter, error); if (hw->mac.type != ixgbe_mac_X550 && @@ -1176,6 +1379,11 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, return -rte_errno; } + vf_num = pci_dev->max_vfs; + + if (l2_tn_filter->pool > vf_num) + return -rte_errno; + return ret; } @@ -1209,6 +1417,15 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* not supported */ if (attr->priority) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1303,12 +1520,9 @@ static inline uint8_t signature_match(const struct rte_flow_item pattern[]) break; if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) { - spec = - (const struct rte_flow_item_fuzzy *)item->spec; - last = - (const struct rte_flow_item_fuzzy *)item->last; - mask = - (const struct rte_flow_item_fuzzy *)item->mask; + spec = item->spec; + last = item->last; + mask = item->mask; if (!spec || !mask) return 0; @@ -1383,7 +1597,8 @@ static inline uint8_t signature_match(const struct rte_flow_item pattern[]) * Item->last should be NULL. */ static int -ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, +ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct ixgbe_fdir_rule *rule, @@ -1406,9 +1621,10 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, const struct rte_flow_item_vlan *vlan_mask; const struct rte_flow_item_raw *raw_mask; const struct rte_flow_item_raw *raw_spec; - uint8_t j; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!pattern) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, @@ -1486,10 +1702,10 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - eth_spec = (const struct rte_flow_item_eth *)item->spec; + eth_spec = item->spec; /* Get the dst MAC. */ - for (j = 0; j < ETHER_ADDR_LEN; j++) { + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { rule->ixgbe_fdir.formatted.inner_mac[j] = eth_spec->dst.addr_bytes[j]; } @@ -1499,7 +1715,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, if (item->mask) { rule->b_mask = TRUE; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_mask = item->mask; /* Ether type should be masked. */ if (eth_mask->type || @@ -1518,7 +1734,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * src MAC address must be masked, * and don't support dst MAC address mask. */ - for (j = 0; j < ETHER_ADDR_LEN; j++) { + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { if (eth_mask->src.addr_bytes[j] || eth_mask->dst.addr_bytes[j] != 0xFF) { memset(rule, 0, @@ -1552,7 +1768,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } } else { - if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_VLAN) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1579,8 +1796,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } - vlan_spec = (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci; @@ -1626,8 +1843,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } rule->b_mask = TRUE; - ipv4_mask = - (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_mask = item->mask; if (ipv4_mask->hdr.version_ihl || ipv4_mask->hdr.type_of_service || ipv4_mask->hdr.total_length || @@ -1647,8 +1863,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - ipv4_spec = - (const struct rte_flow_item_ipv4 *)item->spec; + ipv4_spec = item->spec; rule->ixgbe_fdir.formatted.dst_ip[0] = ipv4_spec->hdr.dst_addr; rule->ixgbe_fdir.formatted.src_ip[0] = @@ -1698,8 +1913,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, } rule->b_mask = TRUE; - ipv6_mask = - (const struct rte_flow_item_ipv6 *)item->mask; + ipv6_mask = item->mask; if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len || ipv6_mask->hdr.proto || @@ -1739,8 +1953,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - ipv6_spec = - (const struct rte_flow_item_ipv6 *)item->spec; + ipv6_spec = item->spec; rte_memcpy(rule->ixgbe_fdir.formatted.src_ip, ipv6_spec->hdr.src_addr, 16); rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip, @@ -1792,7 +2005,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } rule->b_mask = TRUE; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_mask = item->mask; if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack || tcp_mask->hdr.data_off || @@ -1811,7 +2024,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_spec = item->spec; rule->ixgbe_fdir.formatted.src_port = tcp_spec->hdr.src_port; rule->ixgbe_fdir.formatted.dst_port = @@ -1857,7 +2070,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } rule->b_mask = TRUE; - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_mask = item->mask; if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1871,7 +2084,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - udp_spec = (const struct rte_flow_item_udp *)item->spec; + udp_spec = item->spec; rule->ixgbe_fdir.formatted.src_port = udp_spec->hdr.src_port; rule->ixgbe_fdir.formatted.dst_port = @@ -1905,44 +2118,62 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, item, "Not supported last point for range"); return -rte_errno; } - /** - * Only care about src & dst ports, - * others should be masked. - */ - if (!item->mask) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } - rule->b_mask = TRUE; - sctp_mask = - (const struct rte_flow_item_sctp *)item->mask; - if (sctp_mask->hdr.tag || - sctp_mask->hdr.cksum) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } - rule->mask.src_port_mask = sctp_mask->hdr.src_port; - rule->mask.dst_port_mask = sctp_mask->hdr.dst_port; - if (item->spec) { - rule->b_spec = TRUE; - sctp_spec = - (const struct rte_flow_item_sctp *)item->spec; - rule->ixgbe_fdir.formatted.src_port = - sctp_spec->hdr.src_port; - rule->ixgbe_fdir.formatted.dst_port = - sctp_spec->hdr.dst_port; + /* only x550 family only support sctp port */ + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) { + /** + * Only care about src & dst ports, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->b_mask = TRUE; + sctp_mask = item->mask; + if (sctp_mask->hdr.tag || + sctp_mask->hdr.cksum) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->mask.src_port_mask = sctp_mask->hdr.src_port; + rule->mask.dst_port_mask = sctp_mask->hdr.dst_port; + + if (item->spec) { + rule->b_spec = TRUE; + sctp_spec = item->spec; + rule->ixgbe_fdir.formatted.src_port = + sctp_spec->hdr.src_port; + rule->ixgbe_fdir.formatted.dst_port = + sctp_spec->hdr.dst_port; + } + /* others even sctp port is not supported */ + } else { + sctp_mask = item->mask; + if (sctp_mask && + (sctp_mask->hdr.src_port || + sctp_mask->hdr.dst_port || + sctp_mask->hdr.tag || + sctp_mask->hdr.cksum)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } } item = next_no_fuzzy_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_RAW && - item->type != RTE_FLOW_ITEM_TYPE_END) { + item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1969,7 +2200,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } - raw_mask = (const struct rte_flow_item_raw *)item->mask; + raw_mask = item->mask; /* check mask */ if (raw_mask->relative != 0x1 || @@ -1985,7 +2216,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } - raw_spec = (const struct rte_flow_item_raw *)item->spec; + raw_spec = item->spec; /* check spec */ if (raw_spec->relative != 0 || @@ -2236,7 +2467,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* Get the VxLAN info */ if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) { rule->ixgbe_fdir.formatted.tunnel_type = - RTE_FDIR_TUNNEL_TYPE_VXLAN; + IXGBE_FDIR_VXLAN_TUNNEL_TYPE; /* Only care about VNI, others should be masked. */ if (!item->mask) { @@ -2258,8 +2489,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* Tunnel type is always meaningful. */ rule->mask.tunnel_type_mask = 1; - vxlan_mask = - (const struct rte_flow_item_vxlan *)item->mask; + vxlan_mask = item->mask; if (vxlan_mask->flags) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2285,20 +2515,17 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - vxlan_spec = (const struct rte_flow_item_vxlan *) - item->spec; + vxlan_spec = item->spec; rte_memcpy(((uint8_t *) - &rule->ixgbe_fdir.formatted.tni_vni + 1), + &rule->ixgbe_fdir.formatted.tni_vni), vxlan_spec->vni, RTE_DIM(vxlan_spec->vni)); - rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32( - rule->ixgbe_fdir.formatted.tni_vni); } } /* Get the NVGRE info */ if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) { rule->ixgbe_fdir.formatted.tunnel_type = - RTE_FDIR_TUNNEL_TYPE_NVGRE; + IXGBE_FDIR_NVGRE_TUNNEL_TYPE; /** * Only care about flags0, flags1, protocol and TNI, @@ -2323,8 +2550,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* Tunnel type is always meaningful. */ rule->mask.tunnel_type_mask = 1; - nvgre_mask = - (const struct rte_flow_item_nvgre *)item->mask; + nvgre_mask = item->mask; if (nvgre_mask->flow_id) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2332,8 +2558,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, item, "Not supported by fdir filter"); return -rte_errno; } - if (nvgre_mask->c_k_s_rsvd0_ver != - rte_cpu_to_be_16(0x3000) || + if (nvgre_mask->protocol && nvgre_mask->protocol != 0xFFFF) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2341,6 +2566,15 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, item, "Not supported by fdir filter"); return -rte_errno; } + if (nvgre_mask->c_k_s_rsvd0_ver && + nvgre_mask->c_k_s_rsvd0_ver != + rte_cpu_to_be_16(0xFFFF)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } /* TNI must be totally masked or not. */ if (nvgre_mask->tni[0] && ((nvgre_mask->tni[0] != 0xFF) || @@ -2359,10 +2593,17 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - nvgre_spec = - (const struct rte_flow_item_nvgre *)item->spec; + nvgre_spec = item->spec; if (nvgre_spec->c_k_s_rsvd0_ver != - rte_cpu_to_be_16(0x2000) || + rte_cpu_to_be_16(0x2000) && + nvgre_mask->c_k_s_rsvd0_ver) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + if (nvgre_mask->protocol && nvgre_spec->protocol != rte_cpu_to_be_16(NVGRE_PROTOCOL)) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2374,7 +2615,6 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* tni is a 24-bits bit field */ rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni, nvgre_spec->tni, RTE_DIM(nvgre_spec->tni)); - rule->ixgbe_fdir.formatted.tni_vni <<= 8; } } @@ -2408,7 +2648,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, return -rte_errno; } rule->b_mask = TRUE; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_mask = item->mask; /* Ether type should be masked. */ if (eth_mask->type) { @@ -2420,7 +2660,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, } /* src MAC address should be masked. */ - for (j = 0; j < ETHER_ADDR_LEN; j++) { + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { if (eth_mask->src.addr_bytes[j]) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2431,7 +2671,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, } } rule->mask.mac_addr_byte_mask = 0; - for (j = 0; j < ETHER_ADDR_LEN; j++) { + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { /* It's a per byte mask. */ if (eth_mask->dst.addr_bytes[j] == 0xFF) { rule->mask.mac_addr_byte_mask |= 0x1 << j; @@ -2449,10 +2689,10 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - eth_spec = (const struct rte_flow_item_eth *)item->spec; + eth_spec = item->spec; /* Get the dst MAC. */ - for (j = 0; j < ETHER_ADDR_LEN; j++) { + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { rule->ixgbe_fdir.formatted.inner_mac[j] = eth_spec->dst.addr_bytes[j]; } @@ -2488,8 +2728,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, return -rte_errno; } - vlan_spec = (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci; @@ -2536,7 +2776,7 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev, hw->mac.type != ixgbe_mac_X550EM_a) return -ENOTSUP; - ret = ixgbe_parse_fdir_filter_normal(attr, pattern, + ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern, actions, rule, error); if (!ret) @@ -2545,13 +2785,164 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev, ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern, actions, rule, error); + if (ret) + return ret; + step_next: + + if (hw->mac.type == ixgbe_mac_82599EB && + rule->fdirflags == IXGBE_FDIRCMD_DROP && + (rule->ixgbe_fdir.formatted.src_port != 0 || + rule->ixgbe_fdir.formatted.dst_port != 0)) + return -ENOTSUP; + if (fdir_mode == RTE_FDIR_MODE_NONE || fdir_mode != rule->mode) return -ENOTSUP; + + if (rule->queue >= dev->data->nb_rx_queues) + return -ENOTSUP; + return ret; } +static int +ixgbe_parse_rss_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_action actions[], + struct ixgbe_rte_flow_rss_conf *rss_conf, + struct rte_flow_error *error) +{ + const struct rte_flow_action *act; + const struct rte_flow_action_rss *rss; + uint16_t n; + + /** + * rss only supports forwarding, + * check if the first not void action is RSS. + */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + rss = (const struct rte_flow_action_rss *)act->conf; + + if (!rss || !rss->queue_num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + + for (n = 0; n < rss->queue_num; n++) { + if (rss->queue[n] >= dev->data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "queue id > max number of queues"); + return -rte_errno; + } + } + + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key must be exactly 40 bytes"); + if (rss->queue_num > RTE_DIM(rss_conf->queue)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (ixgbe_rss_conf_init(rss_conf, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); + + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + if (attr->priority > 0xFFFF) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Error priority."); + return -rte_errno; + } + + return 0; +} + +/* remove the rss filter */ +static void +ixgbe_clear_rss_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + if (filter_info->rss_info.conf.queue_num) + ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE); +} + +void +ixgbe_filterlist_init(void) +{ + TAILQ_INIT(&filter_ntuple_list); + TAILQ_INIT(&filter_ethertype_list); + TAILQ_INIT(&filter_syn_list); + TAILQ_INIT(&filter_fdir_list); + TAILQ_INIT(&filter_l2_tunnel_list); + TAILQ_INIT(&filter_rss_list); + TAILQ_INIT(&ixgbe_flow_list); +} + void ixgbe_filterlist_flush(void) { @@ -2561,6 +2952,7 @@ ixgbe_filterlist_flush(void) struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; struct ixgbe_fdir_rule_ele *fdir_rule_ptr; struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; + struct ixgbe_rss_conf_ele *rss_filter_ptr; while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) { TAILQ_REMOVE(&filter_ntuple_list, @@ -2597,6 +2989,13 @@ ixgbe_filterlist_flush(void) rte_free(fdir_rule_ptr); } + while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) { + TAILQ_REMOVE(&filter_rss_list, + rss_filter_ptr, + entries); + rte_free(rss_filter_ptr); + } + while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) { TAILQ_REMOVE(&ixgbe_flow_list, ixgbe_flow_mem_ptr, @@ -2627,13 +3026,16 @@ ixgbe_flow_create(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf l2_tn_filter; struct ixgbe_hw_fdir_info *fdir_info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + struct ixgbe_rte_flow_rss_conf rss_conf; struct rte_flow *flow = NULL; struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr; struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr; struct ixgbe_eth_syn_filter_ele *syn_filter_ptr; struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; struct ixgbe_fdir_rule_ele *fdir_rule_ptr; + struct ixgbe_rss_conf_ele *rss_filter_ptr; struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; + uint8_t first_mask = FALSE; flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0); if (!flow) { @@ -2654,12 +3056,23 @@ ixgbe_flow_create(struct rte_eth_dev *dev, memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); ret = ixgbe_parse_ntuple_filter(dev, attr, pattern, actions, &ntuple_filter, error); + +#ifdef RTE_LIBRTE_SECURITY + /* ESP flow not really a flow*/ + if (ntuple_filter.proto == IPPROTO_ESP) + return flow; +#endif + if (!ret) { ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE); if (!ret) { ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter", sizeof(struct ixgbe_ntuple_filter_ele), 0); - (void)rte_memcpy(&ntuple_filter_ptr->filter_info, + if (!ntuple_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&ntuple_filter_ptr->filter_info, &ntuple_filter, sizeof(struct rte_eth_ntuple_filter)); TAILQ_INSERT_TAIL(&filter_ntuple_list, @@ -2681,7 +3094,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, ethertype_filter_ptr = rte_zmalloc( "ixgbe_ethertype_filter", sizeof(struct ixgbe_ethertype_filter_ele), 0); - (void)rte_memcpy(ðertype_filter_ptr->filter_info, + if (!ethertype_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(ðertype_filter_ptr->filter_info, ðertype_filter, sizeof(struct rte_eth_ethertype_filter)); TAILQ_INSERT_TAIL(&filter_ethertype_list, @@ -2701,7 +3118,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, if (!ret) { syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter", sizeof(struct ixgbe_eth_syn_filter_ele), 0); - (void)rte_memcpy(&syn_filter_ptr->filter_info, + if (!syn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&syn_filter_ptr->filter_info, &syn_filter, sizeof(struct rte_eth_syn_filter)); TAILQ_INSERT_TAIL(&filter_syn_list, @@ -2737,6 +3158,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev, goto out; fdir_info->mask_added = TRUE; + first_mask = TRUE; } else { /** * Only support one global mask, @@ -2760,7 +3182,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, if (!ret) { fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter", sizeof(struct ixgbe_fdir_rule_ele), 0); - (void)rte_memcpy(&fdir_rule_ptr->filter_info, + if (!fdir_rule_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule, sizeof(struct ixgbe_fdir_rule)); TAILQ_INSERT_TAIL(&filter_fdir_list, @@ -2771,8 +3197,15 @@ ixgbe_flow_create(struct rte_eth_dev *dev, return flow; } - if (ret) + if (ret) { + /** + * clean the mask_added flag if fail to + * program + **/ + if (first_mask) + fdir_info->mask_added = FALSE; goto out; + } } goto out; @@ -2786,7 +3219,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, if (!ret) { l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter", sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0); - (void)rte_memcpy(&l2_tn_filter_ptr->filter_info, + if (!l2_tn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&l2_tn_filter_ptr->filter_info, &l2_tn_filter, sizeof(struct rte_eth_l2_tunnel_conf)); TAILQ_INSERT_TAIL(&filter_l2_tunnel_list, @@ -2797,6 +3234,28 @@ ixgbe_flow_create(struct rte_eth_dev *dev, } } + memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + ret = ixgbe_parse_rss_filter(dev, attr, + actions, &rss_conf, error); + if (!ret) { + ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE); + if (!ret) { + rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter", + sizeof(struct ixgbe_rss_conf_ele), 0); + if (!rss_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + ixgbe_rss_conf_init(&rss_filter_ptr->filter_info, + &rss_conf.conf); + TAILQ_INSERT_TAIL(&filter_rss_list, + rss_filter_ptr, entries); + flow->rule = rss_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_HASH; + return flow; + } + } + out: TAILQ_REMOVE(&ixgbe_flow_list, ixgbe_flow_mem_ptr, entries); @@ -2825,6 +3284,7 @@ ixgbe_flow_validate(struct rte_eth_dev *dev, struct rte_eth_syn_filter syn_filter; struct rte_eth_l2_tunnel_conf l2_tn_filter; struct ixgbe_fdir_rule fdir_rule; + struct ixgbe_rte_flow_rss_conf rss_conf; int ret; memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); @@ -2854,6 +3314,12 @@ ixgbe_flow_validate(struct rte_eth_dev *dev, memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern, actions, &l2_tn_filter, error); + if (!ret) + return 0; + + memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + ret = ixgbe_parse_rss_filter(dev, attr, + actions, &rss_conf, error); return ret; } @@ -2880,12 +3346,13 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; struct ixgbe_hw_fdir_info *fdir_info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + struct ixgbe_rss_conf_ele *rss_filter_ptr; switch (filter_type) { case RTE_ETH_FILTER_NTUPLE: ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *) pmd_flow->rule; - (void)rte_memcpy(&ntuple_filter, + rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info, sizeof(struct rte_eth_ntuple_filter)); ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE); @@ -2898,7 +3365,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, case RTE_ETH_FILTER_ETHERTYPE: ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *) pmd_flow->rule; - (void)rte_memcpy(ðertype_filter, + rte_memcpy(ðertype_filter, ðertype_filter_ptr->filter_info, sizeof(struct rte_eth_ethertype_filter)); ret = ixgbe_add_del_ethertype_filter(dev, @@ -2912,7 +3379,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, case RTE_ETH_FILTER_SYN: syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *) pmd_flow->rule; - (void)rte_memcpy(&syn_filter, + rte_memcpy(&syn_filter, &syn_filter_ptr->filter_info, sizeof(struct rte_eth_syn_filter)); ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE); @@ -2924,7 +3391,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, break; case RTE_ETH_FILTER_FDIR: fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule; - (void)rte_memcpy(&fdir_rule, + rte_memcpy(&fdir_rule, &fdir_rule_ptr->filter_info, sizeof(struct ixgbe_fdir_rule)); ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE); @@ -2939,7 +3406,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, case RTE_ETH_FILTER_L2_TUNNEL: l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *) pmd_flow->rule; - (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info, + rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info, sizeof(struct rte_eth_l2_tunnel_conf)); ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter); if (!ret) { @@ -2948,6 +3415,17 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, rte_free(l2_tn_filter_ptr); } break; + case RTE_ETH_FILTER_HASH: + rss_filter_ptr = (struct ixgbe_rss_conf_ele *) + pmd_flow->rule; + ret = ixgbe_config_rss_filter(dev, + &rss_filter_ptr->filter_info, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_rss_list, + rss_filter_ptr, entries); + rte_free(rss_filter_ptr); + } + break; default: PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", filter_type); @@ -2999,6 +3477,8 @@ ixgbe_flow_flush(struct rte_eth_dev *dev, return ret; } + ixgbe_clear_rss_filter(dev); + ixgbe_filterlist_flush(); return 0;