X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_flow.c;h=c8645f0569d66b723c167596be13cb4b049431a2;hb=d84fb5eba1dac6a8d8b5506fa38e555c2c8d7e91;hp=35db3173eccd4692bf0478ed493102b0b08be3b5;hpb=c306ee121a2e57e3c24a1cdcbe686e0518f7d1a4;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index 35db3173ec..c8645f0569 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -56,7 +56,6 @@ #include #include #include -#include #include #include #include @@ -78,23 +77,40 @@ #define IXGBE_MIN_N_TUPLE_PRIO 1 #define IXGBE_MAX_N_TUPLE_PRIO 7 -#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\ - do { \ - item = pattern + index;\ - while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\ - index++; \ - item = pattern + index; \ - } \ - } while (0) - -#define NEXT_ITEM_OF_ACTION(act, actions, index)\ - do { \ - act = actions + index; \ - while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\ - index++; \ - act = actions + index; \ - } \ - } while (0) +#define IXGBE_MAX_FLX_SOURCE_OFF 62 + +/** + * Endless loop will never happen with below assumption + * 1. there is at least one no-void item(END) + * 2. cur is before END. + */ +static inline +const struct rte_flow_item *next_no_void_pattern( + const struct rte_flow_item pattern[], + const struct rte_flow_item *cur) +{ + const struct rte_flow_item *next = + cur ? cur + 1 : &pattern[0]; + while (1) { + if (next->type != RTE_FLOW_ITEM_TYPE_VOID) + return next; + next++; + } +} + +static inline +const struct rte_flow_action *next_no_void_action( + const struct rte_flow_action actions[], + const struct rte_flow_action *cur) +{ + const struct rte_flow_action *next = + cur ? cur + 1 : &actions[0]; + while (1) { + if (next->type != RTE_FLOW_ACTION_TYPE_VOID) + return next; + next++; + } +} /** * Please aware there's an asumption for all the parsers. @@ -121,8 +137,8 @@ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF * dst_addr 192.167.3.50 0xFFFFFFFF * next_proto_id 17 0xFF - * UDP/TCP src_port 80 0xFFFF - * dst_port 80 0xFFFF + * UDP/TCP/ src_port 80 0xFFFF + * SCTP dst_port 80 0xFFFF * END * other members in mask and spec should set to 0x00. * item->last should be NULL. @@ -142,7 +158,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_tcp *tcp_mask; const struct rte_flow_item_udp *udp_spec; const struct rte_flow_item_udp *udp_mask; - uint32_t index; + const struct rte_flow_item_sctp *sctp_spec; + const struct rte_flow_item_sctp *sctp_mask; if (!pattern) { rte_flow_error_set(error, @@ -164,11 +181,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse pattern */ - index = 0; - /* the first not void item can be MAC or IPv4 */ - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_ETH && item->type != RTE_FLOW_ITEM_TYPE_IPV4) { @@ -196,8 +210,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } /* check if the next not void item is IPv4 */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -250,10 +263,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->proto = ipv4_spec->hdr.next_proto_id; /* check if the next not void item is TCP or UDP */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_TCP && - item->type != RTE_FLOW_ITEM_TYPE_UDP) { + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_SCTP && + item->type != RTE_FLOW_ITEM_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -262,7 +276,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } /* get the TCP/UDP info */ - if (!item->spec || !item->mask) { + if ((item->type != RTE_FLOW_ITEM_TYPE_END) && + (!item->spec || !item->mask)) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -319,7 +334,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port = tcp_spec->hdr.dst_port; filter->src_port = tcp_spec->hdr.src_port; filter->tcp_flags = tcp_spec->hdr.tcp_flags; - } else { + } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { udp_mask = (const struct rte_flow_item_udp *)item->mask; /** @@ -342,11 +357,35 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, udp_spec = (const struct rte_flow_item_udp *)item->spec; filter->dst_port = udp_spec->hdr.dst_port; filter->src_port = udp_spec->hdr.src_port; + } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) { + sctp_mask = (const struct rte_flow_item_sctp *)item->mask; + + /** + * Only support src & dst ports, + * others should be masked. + */ + if (sctp_mask->hdr.tag || + sctp_mask->hdr.cksum) { + memset(filter, 0, + sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_port_mask = sctp_mask->hdr.dst_port; + filter->src_port_mask = sctp_mask->hdr.src_port; + + sctp_spec = (const struct rte_flow_item_sctp *)item->spec; + filter->dst_port = sctp_spec->hdr.dst_port; + filter->src_port = sctp_spec->hdr.src_port; + } else { + goto action; } /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -355,14 +394,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse action */ - index = 0; +action: /** * n-tuple only supports forwarding, * check if the first not void action is QUEUE. */ - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -374,8 +412,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, ((const struct rte_flow_action_queue *)act->conf)->index; /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -456,9 +493,7 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev, return -rte_errno; } - if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || - filter->priority > IXGBE_5TUPLE_MAX_PRI || - filter->priority < IXGBE_5TUPLE_MIN_PRI) + if (filter->queue >= dev->data->nb_rx_queues) return -rte_errno; /* fixed value for ixgbe */ @@ -494,7 +529,6 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_eth *eth_spec; const struct rte_flow_item_eth *eth_mask; const struct rte_flow_action_queue *act_q; - uint32_t index; if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -517,15 +551,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* Parse pattern */ - index = 0; - + item = next_no_void_pattern(pattern, NULL); /* The first non-void item should be MAC. */ - item = pattern + index; - while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { - index++; - item = pattern + index; - } if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -584,12 +611,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, filter->ether_type = rte_be_to_cpu_16(eth_spec->type); /* Check if the next non-void item is END. */ - index++; - item = pattern + index; - while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { - index++; - item = pattern + index; - } + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -599,13 +621,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, /* Parse action */ - index = 0; - /* Check if the first non-void action is QUEUE or DROP. */ - act = actions + index; - while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { - index++; - act = actions + index; - } + act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE && act->type != RTE_FLOW_ACTION_TYPE_DROP) { rte_flow_error_set(error, EINVAL, @@ -622,12 +638,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, } /* Check if the next non-void item is END */ - index++; - act = actions + index; - while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { - index++; - act = actions + index; - } + act = next_no_void_action(actions, act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -699,7 +710,7 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev, return -rte_errno; } - if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) { + if (filter->queue >= dev->data->nb_rx_queues) { memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -767,7 +778,6 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_tcp *tcp_spec; const struct rte_flow_item_tcp *tcp_mask; const struct rte_flow_action_queue *act_q; - uint32_t index; if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -790,11 +800,9 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse pattern */ - index = 0; /* the first not void item should be MAC or IPv4 or IPv6 or TCP */ - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_ETH && item->type != RTE_FLOW_ITEM_TYPE_IPV4 && item->type != RTE_FLOW_ITEM_TYPE_IPV6 && @@ -823,8 +831,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, } /* check if the next not void item is IPv4 or IPv6 */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && item->type != RTE_FLOW_ITEM_TYPE_IPV6) { rte_flow_error_set(error, EINVAL, @@ -846,8 +853,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, } /* check if the next not void item is TCP */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_TCP) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -891,8 +897,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, } /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_syn_filter)); rte_flow_error_set(error, EINVAL, @@ -901,11 +906,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse action */ - index = 0; - /* check if the first not void action is QUEUE. */ - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { memset(filter, 0, sizeof(struct rte_eth_syn_filter)); rte_flow_error_set(error, EINVAL, @@ -925,8 +927,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, } /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_syn_filter)); rte_flow_error_set(error, EINVAL, @@ -986,6 +987,9 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev, ret = cons_parse_syn_filter(attr, pattern, actions, filter, error); + if (filter->queue >= dev->data->nb_rx_queues) + return -rte_errno; + if (ret) return ret; @@ -1022,7 +1026,6 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_e_tag *e_tag_mask; const struct rte_flow_action *act; const struct rte_flow_action_queue *act_q; - uint32_t index; if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -1044,11 +1047,9 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, NULL, "NULL attribute."); return -rte_errno; } - /* parse pattern */ - index = 0; /* The first not void item should be e-tag. */ - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); rte_flow_error_set(error, EINVAL, @@ -1095,8 +1096,7 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b); /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); rte_flow_error_set(error, EINVAL, @@ -1133,11 +1133,8 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse action */ - index = 0; - /* check if the first not void action is QUEUE. */ - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); rte_flow_error_set(error, EINVAL, @@ -1150,8 +1147,7 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, filter->pool = act_q->index; /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); rte_flow_error_set(error, EINVAL, @@ -1187,6 +1183,9 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, return -rte_errno; } + if (l2_tn_filter->pool >= dev->data->nb_rx_queues) + return -rte_errno; + return ret; } @@ -1200,7 +1199,6 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, const struct rte_flow_action *act; const struct rte_flow_action_queue *act_q; const struct rte_flow_action_mark *mark; - uint32_t index; /* parse attr */ /* must be input direction */ @@ -1230,11 +1228,8 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse action */ - index = 0; - /* check if the first not void action is QUEUE or DROP. */ - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE && act->type != RTE_FLOW_ACTION_TYPE_DROP) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1248,12 +1243,19 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, act_q = (const struct rte_flow_action_queue *)act->conf; rule->queue = act_q->index; } else { /* drop */ + /* signature mode does not support drop action. */ + if (rule->mode == RTE_FDIR_MODE_SIGNATURE) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } rule->fdirflags = IXGBE_FDIRCMD_DROP; } /* check if the next not void item is MARK */ - index++; - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, act); if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) && (act->type != RTE_FLOW_ACTION_TYPE_END)) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1268,8 +1270,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, if (act->type == RTE_FLOW_ACTION_TYPE_MARK) { mark = (const struct rte_flow_action_mark *)act->conf; rule->soft_id = mark->id; - index++; - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, act); } /* check if the next not void item is END */ @@ -1284,14 +1285,78 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, return 0; } +/* search next no void pattern and skip fuzzy */ +static inline +const struct rte_flow_item *next_no_fuzzy_pattern( + const struct rte_flow_item pattern[], + const struct rte_flow_item *cur) +{ + const struct rte_flow_item *next = + next_no_void_pattern(pattern, cur); + while (1) { + if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY) + return next; + next = next_no_void_pattern(pattern, next); + } +} + +static inline uint8_t signature_match(const struct rte_flow_item pattern[]) +{ + const struct rte_flow_item_fuzzy *spec, *last, *mask; + const struct rte_flow_item *item; + uint32_t sh, lh, mh; + int i = 0; + + while (1) { + item = pattern + i; + if (item->type == RTE_FLOW_ITEM_TYPE_END) + break; + + if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) { + spec = + (const struct rte_flow_item_fuzzy *)item->spec; + last = + (const struct rte_flow_item_fuzzy *)item->last; + mask = + (const struct rte_flow_item_fuzzy *)item->mask; + + if (!spec || !mask) + return 0; + + sh = spec->thresh; + + if (!last) + lh = sh; + else + lh = last->thresh; + + mh = mask->thresh; + sh = sh & mh; + lh = lh & mh; + + if (!sh || sh > lh) + return 0; + + return 1; + } + + i++; + } + + return 0; +} + /** * Parse the rule to see if it is a IP or MAC VLAN flow director rule. * And get the flow director filter info BTW. * UDP/TCP/SCTP PATTERN: - * The first not void item can be ETH or IPV4. - * The second not void item must be IPV4 if the first one is ETH. - * The third not void item must be UDP or TCP or SCTP. + * The first not void item can be ETH or IPV4 or IPV6 + * The second not void item must be IPV4 or IPV6 if the first one is ETH. + * The next not void item could be UDP or TCP or SCTP (optional) + * The next not void item could be RAW (for flexbyte, optional) * The next not void item must be END. + * A Fuzzy Match pattern can appear at any place before END. + * Fuzzy Match is optional for IPV4 but is required for IPV6 * MAC VLAN PATTERN: * The first not void item must be ETH. * The second not void item must be MAC VLAN. @@ -1308,6 +1373,14 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, * dst_addr 192.167.3.50 0xFFFFFFFF * UDP/TCP/SCTP src_port 80 0xFFFF * dst_port 80 0xFFFF + * FLEX relative 0 0x1 + * search 0 0x1 + * reserved 0 0 + * offset 12 0xFFFFFFFF + * limit 0 0xFFFF + * length 2 0xFFFF + * pattern[0] 0x86 0xFF + * pattern[1] 0xDD 0xFF * END * MAC VLAN pattern example: * ITEM Spec Mask @@ -1315,13 +1388,13 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF, 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF} * MAC VLAN tci 0x2016 0xEFFF - * tpid 0x8100 0xFFFF * END * Other members in mask and spec should set to 0x00. * Item->last should be NULL. */ static int -ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, +ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct ixgbe_fdir_rule *rule, @@ -1332,6 +1405,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, const struct rte_flow_item_eth *eth_mask; const struct rte_flow_item_ipv4 *ipv4_spec; const struct rte_flow_item_ipv4 *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec; + const struct rte_flow_item_ipv6 *ipv6_mask; const struct rte_flow_item_tcp *tcp_spec; const struct rte_flow_item_tcp *tcp_mask; const struct rte_flow_item_udp *udp_spec; @@ -1340,8 +1415,11 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, const struct rte_flow_item_sctp *sctp_mask; const struct rte_flow_item_vlan *vlan_spec; const struct rte_flow_item_vlan *vlan_mask; + const struct rte_flow_item_raw *raw_mask; + const struct rte_flow_item_raw *raw_spec; + uint8_t j; - uint32_t index, j; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -1371,17 +1449,16 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask)); rule->mask.vlan_tci_mask = 0; - - /* parse pattern */ - index = 0; + rule->mask.flex_bytes_mask = 0; /** * The first not void item should be * MAC or IPv4 or TCP or UDP or SCTP. */ - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_fuzzy_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_ETH && item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6 && item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_SCTP) { @@ -1392,7 +1469,10 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } - rule->mode = RTE_FDIR_MODE_PERFECT; + if (signature_match(pattern)) + rule->mode = RTE_FDIR_MODE_SIGNATURE; + else + rule->mode = RTE_FDIR_MODE_PERFECT; /*Not supported last point for range*/ if (item->last) { @@ -1429,14 +1509,13 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, if (item->mask) { - /* If ethernet has meaning, it means MAC VLAN mode. */ - rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN; rule->b_mask = TRUE; eth_mask = (const struct rte_flow_item_eth *)item->mask; /* Ether type should be masked. */ - if (eth_mask->type) { + if (eth_mask->type || + rule->mode == RTE_FDIR_MODE_SIGNATURE) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1444,6 +1523,9 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } + /* If ethernet has meaning, it means MAC VLAN mode. */ + rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN; + /** * src MAC address must be masked, * and don't support dst MAC address mask. @@ -1472,8 +1554,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Check if the next not void item is vlan or ipv4. * IPv6 is not supported. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_fuzzy_pattern(pattern, item); if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1513,39 +1594,15 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, vlan_spec = (const struct rte_flow_item_vlan *)item->spec; vlan_mask = (const struct rte_flow_item_vlan *)item->mask; - if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } - rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci; - if (vlan_mask->tpid != (uint16_t)~0U) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } rule->mask.vlan_tci_mask = vlan_mask->tci; rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF); /* More than one tags are not supported. */ - /** - * Check if the next not void item is not vlan. - */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); - if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } else if (item->type != RTE_FLOW_ITEM_TYPE_END) { + /* Next not void item must be END */ + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1554,7 +1611,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, } } - /* Get the IP info. */ + /* Get the IPV4 info. */ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { /** * Set the flow type even if there's no content @@ -1614,12 +1671,104 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Check if the next not void item is * TCP or UDP or SCTP or END. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_fuzzy_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_SCTP && - item->type != RTE_FLOW_ITEM_TYPE_END) { + item->type != RTE_FLOW_ITEM_TYPE_END && + item->type != RTE_FLOW_ITEM_TYPE_RAW) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Get the IPV6 info. */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { + /** + * Set the flow type even if there's no content + * as we must have a flow type. + */ + rule->ixgbe_fdir.formatted.flow_type = + IXGBE_ATR_FLOW_TYPE_IPV6; + + /** + * 1. must signature match + * 2. not support last + * 3. mask must not null + */ + if (rule->mode != RTE_FDIR_MODE_SIGNATURE || + item->last || + !item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + rule->b_mask = TRUE; + ipv6_mask = + (const struct rte_flow_item_ipv6 *)item->mask; + if (ipv6_mask->hdr.vtc_flow || + ipv6_mask->hdr.payload_len || + ipv6_mask->hdr.proto || + ipv6_mask->hdr.hop_limits) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /* check src addr mask */ + for (j = 0; j < 16; j++) { + if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) { + rule->mask.src_ipv6_mask |= 1 << j; + } else if (ipv6_mask->hdr.src_addr[j] != 0) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* check dst addr mask */ + for (j = 0; j < 16; j++) { + if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) { + rule->mask.dst_ipv6_mask |= 1 << j; + } else if (ipv6_mask->hdr.dst_addr[j] != 0) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + if (item->spec) { + rule->b_spec = TRUE; + ipv6_spec = + (const struct rte_flow_item_ipv6 *)item->spec; + rte_memcpy(rule->ixgbe_fdir.formatted.src_ip, + ipv6_spec->hdr.src_addr, 16); + rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip, + ipv6_spec->hdr.dst_addr, 16); + } + + /** + * Check if the next not void item is + * TCP or UDP or SCTP or END. + */ + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_SCTP && + item->type != RTE_FLOW_ITEM_TYPE_END && + item->type != RTE_FLOW_ITEM_TYPE_RAW) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1634,8 +1783,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Set the flow type even if there's no content * as we must have a flow type. */ - rule->ixgbe_fdir.formatted.flow_type = - IXGBE_ATR_FLOW_TYPE_TCPV4; + rule->ixgbe_fdir.formatted.flow_type |= + IXGBE_ATR_L4TYPE_TCP; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, EINVAL, @@ -1680,6 +1829,17 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, rule->ixgbe_fdir.formatted.dst_port = tcp_spec->hdr.dst_port; } + + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } /* Get the UDP info */ @@ -1688,8 +1848,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Set the flow type even if there's no content * as we must have a flow type. */ - rule->ixgbe_fdir.formatted.flow_type = - IXGBE_ATR_FLOW_TYPE_UDPV4; + rule->ixgbe_fdir.formatted.flow_type |= + IXGBE_ATR_L4TYPE_UDP; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, EINVAL, @@ -1729,6 +1889,17 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, rule->ixgbe_fdir.formatted.dst_port = udp_spec->hdr.dst_port; } + + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } /* Get the SCTP info */ @@ -1737,8 +1908,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Set the flow type even if there's no content * as we must have a flow type. */ - rule->ixgbe_fdir.formatted.flow_type = - IXGBE_ATR_FLOW_TYPE_SCTPV4; + rule->ixgbe_fdir.formatted.flow_type |= + IXGBE_ATR_L4TYPE_SCTP; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, EINVAL, @@ -1746,46 +1917,147 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, item, "Not supported last point for range"); return -rte_errno; } - /** - * Only care about src & dst ports, - * others should be masked. - */ - if (!item->mask) { + + /* only x550 family only support sctp port */ + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) { + /** + * Only care about src & dst ports, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->b_mask = TRUE; + sctp_mask = + (const struct rte_flow_item_sctp *)item->mask; + if (sctp_mask->hdr.tag || + sctp_mask->hdr.cksum) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->mask.src_port_mask = sctp_mask->hdr.src_port; + rule->mask.dst_port_mask = sctp_mask->hdr.dst_port; + + if (item->spec) { + rule->b_spec = TRUE; + sctp_spec = + (const struct rte_flow_item_sctp *)item->spec; + rule->ixgbe_fdir.formatted.src_port = + sctp_spec->hdr.src_port; + rule->ixgbe_fdir.formatted.dst_port = + sctp_spec->hdr.dst_port; + } + /* others even sctp port is not supported */ + } else { + sctp_mask = + (const struct rte_flow_item_sctp *)item->mask; + if (sctp_mask && + (sctp_mask->hdr.src_port || + sctp_mask->hdr.dst_port || + sctp_mask->hdr.tag || + sctp_mask->hdr.cksum)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by fdir filter"); return -rte_errno; } - rule->b_mask = TRUE; - sctp_mask = - (const struct rte_flow_item_sctp *)item->mask; - if (sctp_mask->hdr.tag || - sctp_mask->hdr.cksum) { + } + + /* Get the flex byte info */ + if (item->type == RTE_FLOW_ITEM_TYPE_RAW) { + /* Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /* mask should not be null */ + if (!item->mask || !item->spec) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by fdir filter"); return -rte_errno; } - rule->mask.src_port_mask = sctp_mask->hdr.src_port; - rule->mask.dst_port_mask = sctp_mask->hdr.dst_port; - if (item->spec) { - rule->b_spec = TRUE; - sctp_spec = - (const struct rte_flow_item_sctp *)item->spec; - rule->ixgbe_fdir.formatted.src_port = - sctp_spec->hdr.src_port; - rule->ixgbe_fdir.formatted.dst_port = - sctp_spec->hdr.dst_port; + raw_mask = (const struct rte_flow_item_raw *)item->mask; + + /* check mask */ + if (raw_mask->relative != 0x1 || + raw_mask->search != 0x1 || + raw_mask->reserved != 0x0 || + (uint32_t)raw_mask->offset != 0xffffffff || + raw_mask->limit != 0xffff || + raw_mask->length != 0xffff) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + raw_spec = (const struct rte_flow_item_raw *)item->spec; + + /* check spec */ + if (raw_spec->relative != 0 || + raw_spec->search != 0 || + raw_spec->reserved != 0 || + raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF || + raw_spec->offset % 2 || + raw_spec->limit != 0 || + raw_spec->length != 2 || + /* pattern can't be 0xffff */ + (raw_spec->pattern[0] == 0xff && + raw_spec->pattern[1] == 0xff)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /* check pattern mask */ + if (raw_mask->pattern[0] != 0xff || + raw_mask->pattern[1] != 0xff) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; } + + rule->mask.flex_bytes_mask = 0xffff; + rule->ixgbe_fdir.formatted.flex_bytes = + (((uint16_t)raw_spec->pattern[1]) << 8) | + raw_spec->pattern[0]; + rule->flex_bytes_offset = raw_spec->offset; } if (item->type != RTE_FLOW_ITEM_TYPE_END) { /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_fuzzy_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -1824,6 +2096,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * IPV4/IPV6 NULL NULL * UDP NULL NULL * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF} + * MAC VLAN tci 0x2016 0xEFFF * END * NEGRV pattern example: * ITEM Spec Mask @@ -1831,6 +2104,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * IPV4/IPV6 NULL NULL * NVGRE protocol 0x6558 0xFFFF * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF} + * MAC VLAN tci 0x2016 0xEFFF * END * other members in mask and spec should set to 0x00. * item->last should be NULL. @@ -1851,7 +2125,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, const struct rte_flow_item_eth *eth_mask; const struct rte_flow_item_vlan *vlan_spec; const struct rte_flow_item_vlan *vlan_mask; - uint32_t index, j; + uint32_t j; if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -1882,14 +2156,11 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask)); rule->mask.vlan_tci_mask = 0; - /* parse pattern */ - index = 0; - /** * The first not void item should be * MAC or IPv4 or IPv6 or UDP or VxLAN. */ - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_ETH && item->type != RTE_FLOW_ITEM_TYPE_IPV4 && item->type != RTE_FLOW_ITEM_TYPE_IPV6 && @@ -1915,7 +2186,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, item, "Not supported by fdir filter"); return -rte_errno; } - /*Not supported last point for range*/ + /* Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -1924,8 +2195,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, } /* Check if the next not void item is IPv4 or IPv6. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && item->type != RTE_FLOW_ITEM_TYPE_IPV6) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1956,8 +2226,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, } /* Check if the next not void item is UDP or NVGRE. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_NVGRE) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1987,8 +2256,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, } /* Check if the next not void item is VxLAN. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2144,8 +2412,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, } /* check if the next not void item is MAC */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2228,10 +2495,9 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, * Check if the next not void item is vlan or ipv4. * IPv6 is not supported. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) && - (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) { + (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -2258,48 +2524,15 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, vlan_spec = (const struct rte_flow_item_vlan *)item->spec; vlan_mask = (const struct rte_flow_item_vlan *)item->mask; - if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } - rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci; - if (vlan_mask->tpid != (uint16_t)~0U) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } rule->mask.vlan_tci_mask = vlan_mask->tci; rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF); /* More than one tags are not supported. */ - /** - * Check if the next not void item is not vlan. - */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); - if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } else if (item->type != RTE_FLOW_ITEM_TYPE_END) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2336,7 +2569,7 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev, hw->mac.type != ixgbe_mac_X550EM_a) return -ENOTSUP; - ret = ixgbe_parse_fdir_filter_normal(attr, pattern, + ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern, actions, rule, error); if (!ret) @@ -2345,10 +2578,24 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev, ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern, actions, rule, error); + if (ret) + return ret; + step_next: + + if (hw->mac.type == ixgbe_mac_82599EB && + rule->fdirflags == IXGBE_FDIRCMD_DROP && + (rule->ixgbe_fdir.formatted.src_port != 0 || + rule->ixgbe_fdir.formatted.dst_port != 0)) + return -ENOTSUP; + if (fdir_mode == RTE_FDIR_MODE_NONE || fdir_mode != rule->mode) return -ENOTSUP; + + if (rule->queue >= dev->data->nb_rx_queues) + return -ENOTSUP; + return ret; } @@ -2434,6 +2681,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev, struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; struct ixgbe_fdir_rule_ele *fdir_rule_ptr; struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; + uint8_t first_mask = FALSE; flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0); if (!flow) { @@ -2459,6 +2707,10 @@ ixgbe_flow_create(struct rte_eth_dev *dev, if (!ret) { ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter", sizeof(struct ixgbe_ntuple_filter_ele), 0); + if (!ntuple_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } (void)rte_memcpy(&ntuple_filter_ptr->filter_info, &ntuple_filter, sizeof(struct rte_eth_ntuple_filter)); @@ -2481,6 +2733,10 @@ ixgbe_flow_create(struct rte_eth_dev *dev, ethertype_filter_ptr = rte_zmalloc( "ixgbe_ethertype_filter", sizeof(struct ixgbe_ethertype_filter_ele), 0); + if (!ethertype_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } (void)rte_memcpy(ðertype_filter_ptr->filter_info, ðertype_filter, sizeof(struct rte_eth_ethertype_filter)); @@ -2501,6 +2757,10 @@ ixgbe_flow_create(struct rte_eth_dev *dev, if (!ret) { syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter", sizeof(struct ixgbe_eth_syn_filter_ele), 0); + if (!syn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } (void)rte_memcpy(&syn_filter_ptr->filter_info, &syn_filter, sizeof(struct rte_eth_syn_filter)); @@ -2525,11 +2785,19 @@ ixgbe_flow_create(struct rte_eth_dev *dev, rte_memcpy(&fdir_info->mask, &fdir_rule.mask, sizeof(struct ixgbe_hw_fdir_mask)); + fdir_info->flex_bytes_offset = + fdir_rule.flex_bytes_offset; + + if (fdir_rule.mask.flex_bytes_mask) + ixgbe_fdir_set_flexbytes_offset(dev, + fdir_rule.flex_bytes_offset); + ret = ixgbe_fdir_set_input_mask(dev); if (ret) goto out; fdir_info->mask_added = TRUE; + first_mask = TRUE; } else { /** * Only support one global mask, @@ -2540,6 +2808,10 @@ ixgbe_flow_create(struct rte_eth_dev *dev, sizeof(struct ixgbe_hw_fdir_mask)); if (ret) goto out; + + if (fdir_info->flex_bytes_offset != + fdir_rule.flex_bytes_offset) + goto out; } } @@ -2549,6 +2821,10 @@ ixgbe_flow_create(struct rte_eth_dev *dev, if (!ret) { fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter", sizeof(struct ixgbe_fdir_rule_ele), 0); + if (!fdir_rule_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } (void)rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule, sizeof(struct ixgbe_fdir_rule)); @@ -2560,8 +2836,15 @@ ixgbe_flow_create(struct rte_eth_dev *dev, return flow; } - if (ret) + if (ret) { + /** + * clean the mask_added flag if fail to + * program + **/ + if (first_mask) + fdir_info->mask_added = FALSE; goto out; + } } goto out; @@ -2575,6 +2858,10 @@ ixgbe_flow_create(struct rte_eth_dev *dev, if (!ret) { l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter", sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0); + if (!l2_tn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } (void)rte_memcpy(&l2_tn_filter_ptr->filter_info, &l2_tn_filter, sizeof(struct rte_eth_l2_tunnel_conf)); @@ -2603,7 +2890,7 @@ out: * the HW. Because there can be no enough room for the rule. */ static int -ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev, +ixgbe_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], @@ -2667,6 +2954,8 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; struct ixgbe_fdir_rule_ele *fdir_rule_ptr; struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; + struct ixgbe_hw_fdir_info *fdir_info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); switch (filter_type) { case RTE_ETH_FILTER_NTUPLE: @@ -2719,6 +3008,8 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, TAILQ_REMOVE(&filter_fdir_list, fdir_rule_ptr, entries); rte_free(fdir_rule_ptr); + if (TAILQ_EMPTY(&filter_fdir_list)) + fdir_info->mask_added = false; } break; case RTE_ETH_FILTER_L2_TUNNEL: @@ -2790,9 +3081,8 @@ ixgbe_flow_flush(struct rte_eth_dev *dev, } const struct rte_flow_ops ixgbe_flow_ops = { - ixgbe_flow_validate, - ixgbe_flow_create, - ixgbe_flow_destroy, - ixgbe_flow_flush, - NULL, + .validate = ixgbe_flow_validate, + .create = ixgbe_flow_create, + .destroy = ixgbe_flow_destroy, + .flush = ixgbe_flow_flush, };