X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_flow.c;h=abdeac28b59cc26036d86573a63e44402fe22f43;hb=7b2423cd2e84c61d3052147cea7643d15d49d1e9;hp=f31aebda7062f47f3e434ef7c6afb5b220c78c68;hpb=46ea969177f36ee7d18a3ddd5064c9837c9feed8;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index f31aebda70..abdeac28b5 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include #include @@ -74,6 +74,11 @@ struct ixgbe_eth_l2_tunnel_conf_ele { TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries; struct rte_eth_l2_tunnel_conf filter_info; }; +/* rss filter list structure */ +struct ixgbe_rss_conf_ele { + TAILQ_ENTRY(ixgbe_rss_conf_ele) entries; + struct ixgbe_rte_flow_rss_conf filter_info; +}; /* ixgbe_flow memory list structure */ struct ixgbe_flow_mem { TAILQ_ENTRY(ixgbe_flow_mem) entries; @@ -85,6 +90,7 @@ TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele); TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele); TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele); TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele); +TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele); TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem); static struct ixgbe_ntuple_filter_list filter_ntuple_list; @@ -92,6 +98,7 @@ static struct ixgbe_ethertype_filter_list filter_ethertype_list; static struct ixgbe_syn_filter_list filter_syn_list; static struct ixgbe_fdir_rule_filter_list filter_fdir_list; static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list; +static struct ixgbe_rss_filter_list filter_rss_list; static struct ixgbe_flow_mem_list ixgbe_flow_list; /** @@ -257,8 +264,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } /* Skip Ethernet */ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, @@ -291,8 +298,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { - vlan_spec = (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, @@ -339,7 +346,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_mask = item->mask; /** * Only support src & dst addresses, protocol, * others should be masked. @@ -361,7 +368,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->src_ip_mask = ipv4_mask->hdr.src_addr; filter->proto_mask = ipv4_mask->hdr.next_proto_id; - ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; + ipv4_spec = item->spec; filter->dst_ip = ipv4_spec->hdr.dst_addr; filter->src_ip = ipv4_spec->hdr.src_addr; filter->proto = ipv4_spec->hdr.next_proto_id; @@ -406,7 +413,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_mask = item->mask; /** * Only support src & dst ports, tcp flags, @@ -440,12 +447,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_spec = item->spec; filter->dst_port = tcp_spec->hdr.dst_port; filter->src_port = tcp_spec->hdr.src_port; filter->tcp_flags = tcp_spec->hdr.tcp_flags; } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_mask = item->mask; /** * Only support src & dst ports, @@ -464,11 +471,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = udp_mask->hdr.dst_port; filter->src_port_mask = udp_mask->hdr.src_port; - udp_spec = (const struct rte_flow_item_udp *)item->spec; + udp_spec = item->spec; filter->dst_port = udp_spec->hdr.dst_port; filter->src_port = udp_spec->hdr.src_port; } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) { - sctp_mask = (const struct rte_flow_item_sctp *)item->mask; + sctp_mask = item->mask; /** * Only support src & dst ports, @@ -487,7 +494,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = sctp_mask->hdr.dst_port; filter->src_port_mask = sctp_mask->hdr.src_port; - sctp_spec = (const struct rte_flow_item_sctp *)item->spec; + sctp_spec = item->spec; filter->dst_port = sctp_spec->hdr.dst_port; filter->src_port = sctp_spec->hdr.src_port; } else { @@ -692,8 +699,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /* Mask bits of source MAC address must be full of 0. * Mask bits of destination MAC address must be full @@ -993,8 +1000,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_spec = item->spec; + tcp_mask = item->mask; if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) || tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port || @@ -1191,8 +1198,8 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, return -rte_errno; } - e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec; - e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask; + e_tag_spec = item->spec; + e_tag_mask = item->mask; /* Only care about GRP and E cid base. */ if (e_tag_mask->epcp_edei_in_ecid_b || @@ -1440,12 +1447,9 @@ static inline uint8_t signature_match(const struct rte_flow_item pattern[]) break; if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) { - spec = - (const struct rte_flow_item_fuzzy *)item->spec; - last = - (const struct rte_flow_item_fuzzy *)item->last; - mask = - (const struct rte_flow_item_fuzzy *)item->mask; + spec = item->spec; + last = item->last; + mask = item->mask; if (!spec || !mask) return 0; @@ -1625,7 +1629,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - eth_spec = (const struct rte_flow_item_eth *)item->spec; + eth_spec = item->spec; /* Get the dst MAC. */ for (j = 0; j < ETHER_ADDR_LEN; j++) { @@ -1638,7 +1642,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->mask) { rule->b_mask = TRUE; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_mask = item->mask; /* Ether type should be masked. */ if (eth_mask->type || @@ -1718,8 +1722,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } - vlan_spec = (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci; @@ -1765,8 +1769,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } rule->b_mask = TRUE; - ipv4_mask = - (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_mask = item->mask; if (ipv4_mask->hdr.version_ihl || ipv4_mask->hdr.type_of_service || ipv4_mask->hdr.total_length || @@ -1786,8 +1789,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - ipv4_spec = - (const struct rte_flow_item_ipv4 *)item->spec; + ipv4_spec = item->spec; rule->ixgbe_fdir.formatted.dst_ip[0] = ipv4_spec->hdr.dst_addr; rule->ixgbe_fdir.formatted.src_ip[0] = @@ -1837,8 +1839,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, } rule->b_mask = TRUE; - ipv6_mask = - (const struct rte_flow_item_ipv6 *)item->mask; + ipv6_mask = item->mask; if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len || ipv6_mask->hdr.proto || @@ -1878,8 +1879,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - ipv6_spec = - (const struct rte_flow_item_ipv6 *)item->spec; + ipv6_spec = item->spec; rte_memcpy(rule->ixgbe_fdir.formatted.src_ip, ipv6_spec->hdr.src_addr, 16); rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip, @@ -1931,7 +1931,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } rule->b_mask = TRUE; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_mask = item->mask; if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack || tcp_mask->hdr.data_off || @@ -1950,7 +1950,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_spec = item->spec; rule->ixgbe_fdir.formatted.src_port = tcp_spec->hdr.src_port; rule->ixgbe_fdir.formatted.dst_port = @@ -1996,7 +1996,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } rule->b_mask = TRUE; - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_mask = item->mask; if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2010,7 +2010,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - udp_spec = (const struct rte_flow_item_udp *)item->spec; + udp_spec = item->spec; rule->ixgbe_fdir.formatted.src_port = udp_spec->hdr.src_port; rule->ixgbe_fdir.formatted.dst_port = @@ -2061,8 +2061,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } rule->b_mask = TRUE; - sctp_mask = - (const struct rte_flow_item_sctp *)item->mask; + sctp_mask = item->mask; if (sctp_mask->hdr.tag || sctp_mask->hdr.cksum) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2076,8 +2075,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - sctp_spec = - (const struct rte_flow_item_sctp *)item->spec; + sctp_spec = item->spec; rule->ixgbe_fdir.formatted.src_port = sctp_spec->hdr.src_port; rule->ixgbe_fdir.formatted.dst_port = @@ -2085,8 +2083,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, } /* others even sctp port is not supported */ } else { - sctp_mask = - (const struct rte_flow_item_sctp *)item->mask; + sctp_mask = item->mask; if (sctp_mask && (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port || @@ -2129,7 +2126,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } - raw_mask = (const struct rte_flow_item_raw *)item->mask; + raw_mask = item->mask; /* check mask */ if (raw_mask->relative != 0x1 || @@ -2145,7 +2142,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } - raw_spec = (const struct rte_flow_item_raw *)item->spec; + raw_spec = item->spec; /* check spec */ if (raw_spec->relative != 0 || @@ -2418,8 +2415,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* Tunnel type is always meaningful. */ rule->mask.tunnel_type_mask = 1; - vxlan_mask = - (const struct rte_flow_item_vxlan *)item->mask; + vxlan_mask = item->mask; if (vxlan_mask->flags) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2445,8 +2441,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - vxlan_spec = (const struct rte_flow_item_vxlan *) - item->spec; + vxlan_spec = item->spec; rte_memcpy(((uint8_t *) &rule->ixgbe_fdir.formatted.tni_vni + 1), vxlan_spec->vni, RTE_DIM(vxlan_spec->vni)); @@ -2483,8 +2478,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* Tunnel type is always meaningful. */ rule->mask.tunnel_type_mask = 1; - nvgre_mask = - (const struct rte_flow_item_nvgre *)item->mask; + nvgre_mask = item->mask; if (nvgre_mask->flow_id) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2492,8 +2486,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, item, "Not supported by fdir filter"); return -rte_errno; } - if (nvgre_mask->c_k_s_rsvd0_ver != - rte_cpu_to_be_16(0x3000) || + if (nvgre_mask->protocol && nvgre_mask->protocol != 0xFFFF) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2501,6 +2494,15 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, item, "Not supported by fdir filter"); return -rte_errno; } + if (nvgre_mask->c_k_s_rsvd0_ver && + nvgre_mask->c_k_s_rsvd0_ver != + rte_cpu_to_be_16(0xFFFF)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } /* TNI must be totally masked or not. */ if (nvgre_mask->tni[0] && ((nvgre_mask->tni[0] != 0xFF) || @@ -2519,10 +2521,17 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - nvgre_spec = - (const struct rte_flow_item_nvgre *)item->spec; + nvgre_spec = item->spec; if (nvgre_spec->c_k_s_rsvd0_ver != - rte_cpu_to_be_16(0x2000) || + rte_cpu_to_be_16(0x2000) && + nvgre_mask->c_k_s_rsvd0_ver) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + if (nvgre_mask->protocol && nvgre_spec->protocol != rte_cpu_to_be_16(NVGRE_PROTOCOL)) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2568,7 +2577,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, return -rte_errno; } rule->b_mask = TRUE; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_mask = item->mask; /* Ether type should be masked. */ if (eth_mask->type) { @@ -2609,7 +2618,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - eth_spec = (const struct rte_flow_item_eth *)item->spec; + eth_spec = item->spec; /* Get the dst MAC. */ for (j = 0; j < ETHER_ADDR_LEN; j++) { @@ -2648,8 +2657,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, return -rte_errno; } - vlan_spec = (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci; @@ -2726,6 +2735,109 @@ step_next: return ret; } +static int +ixgbe_parse_rss_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_action actions[], + struct ixgbe_rte_flow_rss_conf *rss_conf, + struct rte_flow_error *error) +{ + const struct rte_flow_action *act; + const struct rte_flow_action_rss *rss; + uint16_t n; + + /** + * rss only supports forwarding, + * check if the first not void action is RSS. + */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + rss = (const struct rte_flow_action_rss *)act->conf; + + if (!rss || !rss->num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + + for (n = 0; n < rss->num; n++) { + if (rss->queue[n] >= dev->data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "queue id > max number of queues"); + return -rte_errno; + } + } + if (rss->rss_conf) + rss_conf->rss_conf = *rss->rss_conf; + else + rss_conf->rss_conf.rss_hf = IXGBE_RSS_OFFLOAD_ALL; + + for (n = 0; n < rss->num; ++n) + rss_conf->queue[n] = rss->queue[n]; + rss_conf->num = rss->num; + + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + if (attr->priority > 0xFFFF) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Error priority."); + return -rte_errno; + } + + return 0; +} + +/* remove the rss filter */ +static void +ixgbe_clear_rss_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + if (filter_info->rss_info.num) + ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE); +} + void ixgbe_filterlist_init(void) { @@ -2734,6 +2846,7 @@ ixgbe_filterlist_init(void) TAILQ_INIT(&filter_syn_list); TAILQ_INIT(&filter_fdir_list); TAILQ_INIT(&filter_l2_tunnel_list); + TAILQ_INIT(&filter_rss_list); TAILQ_INIT(&ixgbe_flow_list); } @@ -2746,6 +2859,7 @@ ixgbe_filterlist_flush(void) struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; struct ixgbe_fdir_rule_ele *fdir_rule_ptr; struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; + struct ixgbe_rss_conf_ele *rss_filter_ptr; while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) { TAILQ_REMOVE(&filter_ntuple_list, @@ -2782,6 +2896,13 @@ ixgbe_filterlist_flush(void) rte_free(fdir_rule_ptr); } + while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) { + TAILQ_REMOVE(&filter_rss_list, + rss_filter_ptr, + entries); + rte_free(rss_filter_ptr); + } + while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) { TAILQ_REMOVE(&ixgbe_flow_list, ixgbe_flow_mem_ptr, @@ -2812,12 +2933,14 @@ ixgbe_flow_create(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf l2_tn_filter; struct ixgbe_hw_fdir_info *fdir_info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + struct ixgbe_rte_flow_rss_conf rss_conf; struct rte_flow *flow = NULL; struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr; struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr; struct ixgbe_eth_syn_filter_ele *syn_filter_ptr; struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; struct ixgbe_fdir_rule_ele *fdir_rule_ptr; + struct ixgbe_rss_conf_ele *rss_filter_ptr; struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; uint8_t first_mask = FALSE; @@ -3018,6 +3141,29 @@ ixgbe_flow_create(struct rte_eth_dev *dev, } } + memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + ret = ixgbe_parse_rss_filter(dev, attr, + actions, &rss_conf, error); + if (!ret) { + ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE); + if (!ret) { + rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter", + sizeof(struct ixgbe_rss_conf_ele), 0); + if (!rss_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&rss_filter_ptr->filter_info, + &rss_conf, + sizeof(struct ixgbe_rte_flow_rss_conf)); + TAILQ_INSERT_TAIL(&filter_rss_list, + rss_filter_ptr, entries); + flow->rule = rss_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_HASH; + return flow; + } + } + out: TAILQ_REMOVE(&ixgbe_flow_list, ixgbe_flow_mem_ptr, entries); @@ -3046,6 +3192,7 @@ ixgbe_flow_validate(struct rte_eth_dev *dev, struct rte_eth_syn_filter syn_filter; struct rte_eth_l2_tunnel_conf l2_tn_filter; struct ixgbe_fdir_rule fdir_rule; + struct ixgbe_rte_flow_rss_conf rss_conf; int ret; memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); @@ -3075,6 +3222,12 @@ ixgbe_flow_validate(struct rte_eth_dev *dev, memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern, actions, &l2_tn_filter, error); + if (!ret) + return 0; + + memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + ret = ixgbe_parse_rss_filter(dev, attr, + actions, &rss_conf, error); return ret; } @@ -3101,6 +3254,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; struct ixgbe_hw_fdir_info *fdir_info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + struct ixgbe_rss_conf_ele *rss_filter_ptr; switch (filter_type) { case RTE_ETH_FILTER_NTUPLE: @@ -3169,6 +3323,17 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, rte_free(l2_tn_filter_ptr); } break; + case RTE_ETH_FILTER_HASH: + rss_filter_ptr = (struct ixgbe_rss_conf_ele *) + pmd_flow->rule; + ret = ixgbe_config_rss_filter(dev, + &rss_filter_ptr->filter_info, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_rss_list, + rss_filter_ptr, entries); + rte_free(rss_filter_ptr); + } + break; default: PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", filter_type); @@ -3220,6 +3385,8 @@ ixgbe_flow_flush(struct rte_eth_dev *dev, return ret; } + ixgbe_clear_rss_filter(dev); + ixgbe_filterlist_flush(); return 0;