X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_flow.c;h=1cb04f149cbe6121368c526632ee3b06a3fb9cab;hb=d69851df12b2;hp=257cd6f5424ae284da149c487c3d6c1fb0e799e5;hpb=11777435c7272870a91e8682b536579165959f7a;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index 257cd6f542..1cb04f149c 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -75,99 +75,10 @@ #include "base/ixgbe_phy.h" #include "rte_pmd_ixgbe.h" -static int ixgbe_flow_flush(struct rte_eth_dev *dev, - struct rte_flow_error *error); -static int -cons_parse_ntuple_filter(const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_eth_ntuple_filter *filter, - struct rte_flow_error *error); -static int -ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_eth_ntuple_filter *filter, - struct rte_flow_error *error); -static int -cons_parse_ethertype_filter(const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, - const struct rte_flow_action *actions, - struct rte_eth_ethertype_filter *filter, - struct rte_flow_error *error); -static int -ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_eth_ethertype_filter *filter, - struct rte_flow_error *error); -static int -cons_parse_syn_filter(const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_eth_syn_filter *filter, - struct rte_flow_error *error); -static int -ixgbe_parse_syn_filter(const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_eth_syn_filter *filter, - struct rte_flow_error *error); -static int -cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_eth_l2_tunnel_conf *filter, - struct rte_flow_error *error); -static int -ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_eth_l2_tunnel_conf *rule, - struct rte_flow_error *error); -static int -ixgbe_validate_fdir_filter(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct ixgbe_fdir_rule *rule, - struct rte_flow_error *error); -static int -ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct ixgbe_fdir_rule *rule, - struct rte_flow_error *error); -static int -ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct ixgbe_fdir_rule *rule, - struct rte_flow_error *error); -static int -ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct ixgbe_fdir_rule *rule, - struct rte_flow_error *error); -static int -ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error); - -const struct rte_flow_ops ixgbe_flow_ops = { - ixgbe_flow_validate, - NULL, - NULL, - ixgbe_flow_flush, - NULL, -}; #define IXGBE_MIN_N_TUPLE_PRIO 1 #define IXGBE_MAX_N_TUPLE_PRIO 7 +#define IXGBE_MAX_FLX_SOURCE_OFF 62 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\ do { \ item = pattern + index;\ @@ -211,8 +122,8 @@ const struct rte_flow_ops ixgbe_flow_ops = { * IPV4 src_addr 192.168.1.20 0xFFFFFFFF * dst_addr 192.167.3.50 0xFFFFFFFF * next_proto_id 17 0xFF - * UDP/TCP src_port 80 0xFFFF - * dst_port 80 0xFFFF + * UDP/TCP/ src_port 80 0xFFFF + * SCTP dst_port 80 0xFFFF * END * other members in mask and spec should set to 0x00. * item->last should be NULL. @@ -232,6 +143,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_tcp *tcp_mask; const struct rte_flow_item_udp *udp_spec; const struct rte_flow_item_udp *udp_mask; + const struct rte_flow_item_sctp *sctp_spec; + const struct rte_flow_item_sctp *sctp_mask; uint32_t index; if (!pattern) { @@ -343,7 +256,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, index++; NEXT_ITEM_OF_PATTERN(item, pattern, index); if (item->type != RTE_FLOW_ITEM_TYPE_TCP && - item->type != RTE_FLOW_ITEM_TYPE_UDP) { + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_SCTP) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -409,7 +323,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port = tcp_spec->hdr.dst_port; filter->src_port = tcp_spec->hdr.src_port; filter->tcp_flags = tcp_spec->hdr.tcp_flags; - } else { + } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { udp_mask = (const struct rte_flow_item_udp *)item->mask; /** @@ -432,6 +346,29 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, udp_spec = (const struct rte_flow_item_udp *)item->spec; filter->dst_port = udp_spec->hdr.dst_port; filter->src_port = udp_spec->hdr.src_port; + } else { + sctp_mask = (const struct rte_flow_item_sctp *)item->mask; + + /** + * Only support src & dst ports, + * others should be masked. + */ + if (sctp_mask->hdr.tag || + sctp_mask->hdr.cksum) { + memset(filter, 0, + sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_port_mask = sctp_mask->hdr.dst_port; + filter->src_port_mask = sctp_mask->hdr.src_port; + + sctp_spec = (const struct rte_flow_item_sctp *)item->spec; + filter->dst_port = sctp_spec->hdr.dst_port; + filter->src_port = sctp_spec->hdr.src_port; } /* check if the next not void item is END */ @@ -510,13 +447,17 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* a specific function for ixgbe because the flags is specific */ static int -ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr, +ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_eth_ntuple_filter *filter, struct rte_flow_error *error) { int ret; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error); @@ -758,13 +699,17 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, } static int -ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr, +ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_eth_ethertype_filter *filter, struct rte_flow_error *error) { int ret; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + MAC_TYPE_FILTER_SUP(hw->mac.type); ret = cons_parse_ethertype_filter(attr, pattern, actions, filter, error); @@ -1053,13 +998,17 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, } static int -ixgbe_parse_syn_filter(const struct rte_flow_attr *attr, +ixgbe_parse_syn_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_eth_syn_filter *filter, struct rte_flow_error *error) { int ret; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + MAC_TYPE_FILTER_SUP(hw->mac.type); ret = cons_parse_syn_filter(attr, pattern, actions, filter, error); @@ -1242,7 +1191,7 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, } static int -ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev, +ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], @@ -1368,7 +1317,8 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, * UDP/TCP/SCTP PATTERN: * The first not void item can be ETH or IPV4. * The second not void item must be IPV4 if the first one is ETH. - * The third not void item must be UDP or TCP or SCTP. + * The next not void item could be UDP or TCP or SCTP (optional) + * The next not void item could be RAW (for flexbyte, optional) * The next not void item must be END. * MAC VLAN PATTERN: * The first not void item must be ETH. @@ -1386,14 +1336,21 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, * dst_addr 192.167.3.50 0xFFFFFFFF * UDP/TCP/SCTP src_port 80 0xFFFF * dst_port 80 0xFFFF + * FLEX relative 0 0x1 + * search 0 0x1 + * reserved 0 0 + * offset 12 0xFFFFFFFF + * limit 0 0xFFFF + * length 2 0xFFFF + * pattern[0] 0x86 0xFF + * pattern[1] 0xDD 0xFF * END * MAC VLAN pattern example: * ITEM Spec Mask * ETH dst_addr {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF, 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF} - * MAC VLAN tci 0x2016 0xFFFF - * tpid 0x8100 0xFFFF + * MAC VLAN tci 0x2016 0xEFFF * END * Other members in mask and spec should set to 0x00. * Item->last should be NULL. @@ -1418,6 +1375,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, const struct rte_flow_item_sctp *sctp_mask; const struct rte_flow_item_vlan *vlan_spec; const struct rte_flow_item_vlan *vlan_mask; + const struct rte_flow_item_raw *raw_mask; + const struct rte_flow_item_raw *raw_spec; uint32_t index, j; @@ -1449,6 +1408,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask)); rule->mask.vlan_tci_mask = 0; + rule->mask.flex_bytes_mask = 0; /* parse pattern */ index = 0; @@ -1591,38 +1551,16 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, vlan_spec = (const struct rte_flow_item_vlan *)item->spec; vlan_mask = (const struct rte_flow_item_vlan *)item->mask; - if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } - rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci; - if (vlan_mask->tpid != (uint16_t)~0U) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } rule->mask.vlan_tci_mask = vlan_mask->tci; + rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF); /* More than one tags are not supported. */ - /** - * Check if the next not void item is not vlan. - */ + /* Next not void item must be END */ index++; NEXT_ITEM_OF_PATTERN(item, pattern, index); - if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } else if (item->type != RTE_FLOW_ITEM_TYPE_END) { + if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1696,7 +1634,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, if (item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_SCTP && - item->type != RTE_FLOW_ITEM_TYPE_END) { + item->type != RTE_FLOW_ITEM_TYPE_END && + item->type != RTE_FLOW_ITEM_TYPE_RAW) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1757,6 +1696,18 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, rule->ixgbe_fdir.formatted.dst_port = tcp_spec->hdr.dst_port; } + + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } /* Get the UDP info */ @@ -1806,6 +1757,18 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, rule->ixgbe_fdir.formatted.dst_port = udp_spec->hdr.dst_port; } + + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } /* Get the SCTP info */ @@ -1857,6 +1820,88 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, rule->ixgbe_fdir.formatted.dst_port = sctp_spec->hdr.dst_port; } + + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Get the flex byte info */ + if (item->type == RTE_FLOW_ITEM_TYPE_RAW) { + /* Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /* mask should not be null */ + if (!item->mask || !item->spec) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + raw_mask = (const struct rte_flow_item_raw *)item->mask; + + /* check mask */ + if (raw_mask->relative != 0x1 || + raw_mask->search != 0x1 || + raw_mask->reserved != 0x0 || + (uint32_t)raw_mask->offset != 0xffffffff || + raw_mask->limit != 0xffff || + raw_mask->length != 0xffff) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + raw_spec = (const struct rte_flow_item_raw *)item->spec; + + /* check spec */ + if (raw_spec->relative != 0 || + raw_spec->search != 0 || + raw_spec->reserved != 0 || + raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF || + raw_spec->offset % 2 || + raw_spec->limit != 0 || + raw_spec->length != 2 || + /* pattern can't be 0xffff */ + (raw_spec->pattern[0] == 0xff && + raw_spec->pattern[1] == 0xff)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /* check pattern mask */ + if (raw_mask->pattern[0] != 0xff || + raw_mask->pattern[1] != 0xff) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + rule->mask.flex_bytes_mask = 0xffff; + rule->ixgbe_fdir.formatted.flex_bytes = + (((uint16_t)raw_spec->pattern[1]) << 8) | + raw_spec->pattern[0]; + rule->flex_bytes_offset = raw_spec->offset; } if (item->type != RTE_FLOW_ITEM_TYPE_END) { @@ -1901,6 +1946,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * IPV4/IPV6 NULL NULL * UDP NULL NULL * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF} + * MAC VLAN tci 0x2016 0xEFFF * END * NEGRV pattern example: * ITEM Spec Mask @@ -1908,6 +1954,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * IPV4/IPV6 NULL NULL * NVGRE protocol 0x6558 0xFFFF * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF} + * MAC VLAN tci 0x2016 0xEFFF * END * other members in mask and spec should set to 0x00. * item->last should be NULL. @@ -1992,7 +2039,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, item, "Not supported by fdir filter"); return -rte_errno; } - /*Not supported last point for range*/ + /* Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -2124,15 +2171,16 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni, RTE_DIM(vxlan_mask->vni)); - rule->mask.tunnel_id_mask <<= 8; if (item->spec) { rule->b_spec = TRUE; vxlan_spec = (const struct rte_flow_item_vxlan *) item->spec; - rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni, + rte_memcpy(((uint8_t *) + &rule->ixgbe_fdir.formatted.tni_vni + 1), vxlan_spec->vni, RTE_DIM(vxlan_spec->vni)); - rule->ixgbe_fdir.formatted.tni_vni <<= 8; + rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32( + rule->ixgbe_fdir.formatted.tni_vni); } } @@ -2307,7 +2355,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, index++; NEXT_ITEM_OF_PATTERN(item, pattern, index); if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) && - (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) { + (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -2334,47 +2382,16 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, vlan_spec = (const struct rte_flow_item_vlan *)item->spec; vlan_mask = (const struct rte_flow_item_vlan *)item->mask; - if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } - rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci; - if (vlan_mask->tpid != (uint16_t)~0U) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } rule->mask.vlan_tci_mask = vlan_mask->tci; + rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF); /* More than one tags are not supported. */ - /** - * Check if the next not void item is not vlan. - */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); - if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } else if (item->type != RTE_FLOW_ITEM_TYPE_END) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } /* check if the next not void item is END */ index++; NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2393,47 +2410,294 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, } static int -ixgbe_validate_fdir_filter(struct rte_eth_dev *dev, +ixgbe_parse_fdir_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct ixgbe_fdir_rule *rule, struct rte_flow_error *error) { - int ret = 0; - + int ret; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; - ixgbe_parse_fdir_filter(attr, pattern, actions, - rule, error); + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X540 && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) + return -ENOTSUP; + + ret = ixgbe_parse_fdir_filter_normal(attr, pattern, + actions, rule, error); + if (!ret) + goto step_next; + ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern, + actions, rule, error); + +step_next: if (fdir_mode == RTE_FDIR_MODE_NONE || fdir_mode != rule->mode) return -ENOTSUP; - return ret; } -static int -ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct ixgbe_fdir_rule *rule, - struct rte_flow_error *error) +void +ixgbe_filterlist_flush(void) +{ + struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr; + struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr; + struct ixgbe_eth_syn_filter_ele *syn_filter_ptr; + struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; + struct ixgbe_fdir_rule_ele *fdir_rule_ptr; + struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; + + while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) { + TAILQ_REMOVE(&filter_ntuple_list, + ntuple_filter_ptr, + entries); + rte_free(ntuple_filter_ptr); + } + + while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) { + TAILQ_REMOVE(&filter_ethertype_list, + ethertype_filter_ptr, + entries); + rte_free(ethertype_filter_ptr); + } + + while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) { + TAILQ_REMOVE(&filter_syn_list, + syn_filter_ptr, + entries); + rte_free(syn_filter_ptr); + } + + while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) { + TAILQ_REMOVE(&filter_l2_tunnel_list, + l2_tn_filter_ptr, + entries); + rte_free(l2_tn_filter_ptr); + } + + while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) { + TAILQ_REMOVE(&filter_fdir_list, + fdir_rule_ptr, + entries); + rte_free(fdir_rule_ptr); + } + + while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) { + TAILQ_REMOVE(&ixgbe_flow_list, + ixgbe_flow_mem_ptr, + entries); + rte_free(ixgbe_flow_mem_ptr->flow); + rte_free(ixgbe_flow_mem_ptr); + } +} + +/** + * Create or destroy a flow rule. + * Theorically one rule can match more than one filters. + * We will let it use the filter which it hitt first. + * So, the sequence matters. + */ +static struct rte_flow * +ixgbe_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { int ret; + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; + struct ixgbe_fdir_rule fdir_rule; + struct rte_eth_l2_tunnel_conf l2_tn_filter; + struct ixgbe_hw_fdir_info *fdir_info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + struct rte_flow *flow = NULL; + struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr; + struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr; + struct ixgbe_eth_syn_filter_ele *syn_filter_ptr; + struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; + struct ixgbe_fdir_rule_ele *fdir_rule_ptr; + struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; + + flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0); + if (!flow) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return (struct rte_flow *)flow; + } + ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem", + sizeof(struct ixgbe_flow_mem), 0); + if (!ixgbe_flow_mem_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + rte_free(flow); + return NULL; + } + ixgbe_flow_mem_ptr->flow = flow; + TAILQ_INSERT_TAIL(&ixgbe_flow_list, + ixgbe_flow_mem_ptr, entries); - ret = ixgbe_parse_fdir_filter_normal(attr, pattern, - actions, rule, error); + memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + ret = ixgbe_parse_ntuple_filter(dev, attr, pattern, + actions, &ntuple_filter, error); + if (!ret) { + ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE); + if (!ret) { + ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter", + sizeof(struct ixgbe_ntuple_filter_ele), 0); + (void)rte_memcpy(&ntuple_filter_ptr->filter_info, + &ntuple_filter, + sizeof(struct rte_eth_ntuple_filter)); + TAILQ_INSERT_TAIL(&filter_ntuple_list, + ntuple_filter_ptr, entries); + flow->rule = ntuple_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_NTUPLE; + return flow; + } + goto out; + } - if (!ret) - return 0; + memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + ret = ixgbe_parse_ethertype_filter(dev, attr, pattern, + actions, ðertype_filter, error); + if (!ret) { + ret = ixgbe_add_del_ethertype_filter(dev, + ðertype_filter, TRUE); + if (!ret) { + ethertype_filter_ptr = rte_zmalloc( + "ixgbe_ethertype_filter", + sizeof(struct ixgbe_ethertype_filter_ele), 0); + (void)rte_memcpy(ðertype_filter_ptr->filter_info, + ðertype_filter, + sizeof(struct rte_eth_ethertype_filter)); + TAILQ_INSERT_TAIL(&filter_ethertype_list, + ethertype_filter_ptr, entries); + flow->rule = ethertype_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_ETHERTYPE; + return flow; + } + goto out; + } - ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern, - actions, rule, error); + memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + ret = ixgbe_parse_syn_filter(dev, attr, pattern, + actions, &syn_filter, error); + if (!ret) { + ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE); + if (!ret) { + syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter", + sizeof(struct ixgbe_eth_syn_filter_ele), 0); + (void)rte_memcpy(&syn_filter_ptr->filter_info, + &syn_filter, + sizeof(struct rte_eth_syn_filter)); + TAILQ_INSERT_TAIL(&filter_syn_list, + syn_filter_ptr, + entries); + flow->rule = syn_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_SYN; + return flow; + } + goto out; + } - return ret; + memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule)); + ret = ixgbe_parse_fdir_filter(dev, attr, pattern, + actions, &fdir_rule, error); + if (!ret) { + /* A mask cannot be deleted. */ + if (fdir_rule.b_mask) { + if (!fdir_info->mask_added) { + /* It's the first time the mask is set. */ + rte_memcpy(&fdir_info->mask, + &fdir_rule.mask, + sizeof(struct ixgbe_hw_fdir_mask)); + fdir_info->flex_bytes_offset = + fdir_rule.flex_bytes_offset; + + if (fdir_rule.mask.flex_bytes_mask) + ixgbe_fdir_set_flexbytes_offset(dev, + fdir_rule.flex_bytes_offset); + + ret = ixgbe_fdir_set_input_mask(dev); + if (ret) + goto out; + + fdir_info->mask_added = TRUE; + } else { + /** + * Only support one global mask, + * all the masks should be the same. + */ + ret = memcmp(&fdir_info->mask, + &fdir_rule.mask, + sizeof(struct ixgbe_hw_fdir_mask)); + if (ret) + goto out; + + if (fdir_info->flex_bytes_offset != + fdir_rule.flex_bytes_offset) + goto out; + } + } + + if (fdir_rule.b_spec) { + ret = ixgbe_fdir_filter_program(dev, &fdir_rule, + FALSE, FALSE); + if (!ret) { + fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter", + sizeof(struct ixgbe_fdir_rule_ele), 0); + (void)rte_memcpy(&fdir_rule_ptr->filter_info, + &fdir_rule, + sizeof(struct ixgbe_fdir_rule)); + TAILQ_INSERT_TAIL(&filter_fdir_list, + fdir_rule_ptr, entries); + flow->rule = fdir_rule_ptr; + flow->filter_type = RTE_ETH_FILTER_FDIR; + + return flow; + } + + if (ret) + goto out; + } + + goto out; + } + + memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern, + actions, &l2_tn_filter, error); + if (!ret) { + ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE); + if (!ret) { + l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter", + sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0); + (void)rte_memcpy(&l2_tn_filter_ptr->filter_info, + &l2_tn_filter, + sizeof(struct rte_eth_l2_tunnel_conf)); + TAILQ_INSERT_TAIL(&filter_l2_tunnel_list, + l2_tn_filter_ptr, entries); + flow->rule = l2_tn_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL; + return flow; + } + } + +out: + TAILQ_REMOVE(&ixgbe_flow_list, + ixgbe_flow_mem_ptr, entries); + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow."); + rte_free(ixgbe_flow_mem_ptr); + rte_free(flow); + return NULL; } /** @@ -2442,7 +2706,7 @@ ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr, * the HW. Because there can be no enough room for the rule. */ static int -ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev, +ixgbe_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], @@ -2456,36 +2720,152 @@ ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev, int ret; memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); - ret = ixgbe_parse_ntuple_filter(attr, pattern, + ret = ixgbe_parse_ntuple_filter(dev, attr, pattern, actions, &ntuple_filter, error); if (!ret) return 0; memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); - ret = ixgbe_parse_ethertype_filter(attr, pattern, + ret = ixgbe_parse_ethertype_filter(dev, attr, pattern, actions, ðertype_filter, error); if (!ret) return 0; memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); - ret = ixgbe_parse_syn_filter(attr, pattern, + ret = ixgbe_parse_syn_filter(dev, attr, pattern, actions, &syn_filter, error); if (!ret) return 0; memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule)); - ret = ixgbe_validate_fdir_filter(dev, attr, pattern, + ret = ixgbe_parse_fdir_filter(dev, attr, pattern, actions, &fdir_rule, error); if (!ret) return 0; memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); - ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern, + ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern, actions, &l2_tn_filter, error); return ret; } +/* Destroy a flow rule on ixgbe. */ +static int +ixgbe_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret; + struct rte_flow *pmd_flow = flow; + enum rte_filter_type filter_type = pmd_flow->filter_type; + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; + struct ixgbe_fdir_rule fdir_rule; + struct rte_eth_l2_tunnel_conf l2_tn_filter; + struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr; + struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr; + struct ixgbe_eth_syn_filter_ele *syn_filter_ptr; + struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; + struct ixgbe_fdir_rule_ele *fdir_rule_ptr; + struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; + struct ixgbe_hw_fdir_info *fdir_info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *) + pmd_flow->rule; + (void)rte_memcpy(&ntuple_filter, + &ntuple_filter_ptr->filter_info, + sizeof(struct rte_eth_ntuple_filter)); + ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_ntuple_list, + ntuple_filter_ptr, entries); + rte_free(ntuple_filter_ptr); + } + break; + case RTE_ETH_FILTER_ETHERTYPE: + ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *) + pmd_flow->rule; + (void)rte_memcpy(ðertype_filter, + ðertype_filter_ptr->filter_info, + sizeof(struct rte_eth_ethertype_filter)); + ret = ixgbe_add_del_ethertype_filter(dev, + ðertype_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_ethertype_list, + ethertype_filter_ptr, entries); + rte_free(ethertype_filter_ptr); + } + break; + case RTE_ETH_FILTER_SYN: + syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *) + pmd_flow->rule; + (void)rte_memcpy(&syn_filter, + &syn_filter_ptr->filter_info, + sizeof(struct rte_eth_syn_filter)); + ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_syn_list, + syn_filter_ptr, entries); + rte_free(syn_filter_ptr); + } + break; + case RTE_ETH_FILTER_FDIR: + fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule; + (void)rte_memcpy(&fdir_rule, + &fdir_rule_ptr->filter_info, + sizeof(struct ixgbe_fdir_rule)); + ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_fdir_list, + fdir_rule_ptr, entries); + rte_free(fdir_rule_ptr); + if (TAILQ_EMPTY(&filter_fdir_list)) + fdir_info->mask_added = false; + } + break; + case RTE_ETH_FILTER_L2_TUNNEL: + l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *) + pmd_flow->rule; + (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info, + sizeof(struct rte_eth_l2_tunnel_conf)); + ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter); + if (!ret) { + TAILQ_REMOVE(&filter_l2_tunnel_list, + l2_tn_filter_ptr, entries); + rte_free(l2_tn_filter_ptr); + } + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to destroy flow"); + return ret; + } + + TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) { + if (ixgbe_flow_mem_ptr->flow == pmd_flow) { + TAILQ_REMOVE(&ixgbe_flow_list, + ixgbe_flow_mem_ptr, entries); + rte_free(ixgbe_flow_mem_ptr); + } + } + rte_free(flow); + + return ret; +} + /* Destroy all flow rules associated with a port on ixgbe. */ static int ixgbe_flow_flush(struct rte_eth_dev *dev, @@ -2511,5 +2891,15 @@ ixgbe_flow_flush(struct rte_eth_dev *dev, return ret; } + ixgbe_filterlist_flush(); + return 0; } + +const struct rte_flow_ops ixgbe_flow_ops = { + ixgbe_flow_validate, + ixgbe_flow_create, + ixgbe_flow_destroy, + ixgbe_flow_flush, + NULL, +};