X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Figb_flow.c;h=43fef889b5f4b540fbc3ab3943714c7d3632c0d7;hb=b565280d45022292e566cf98f8ccf926d8048d2c;hp=76c0c520f8a184e3d9ea6b58cd1b6856fced9df1;hpb=b96c16f562796def1d95df7f0d9d0630aaed2dec;p=dpdk.git diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c index 76c0c520f8..43fef889b5 100644 --- a/drivers/net/e1000/igb_flow.c +++ b/drivers/net/e1000/igb_flow.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation */ #include @@ -44,10 +15,9 @@ #include #include #include -#include +#include #include #include -#include #include #include #include @@ -79,6 +49,13 @@ #define IGB_FLEX_RAW_NUM 12 +struct igb_flow_mem_list igb_flow_list; +struct igb_ntuple_filter_list igb_filter_ntuple_list; +struct igb_ethertype_filter_list igb_filter_ethertype_list; +struct igb_syn_filter_list igb_filter_syn_list; +struct igb_flex_filter_list igb_filter_flex_list; +struct igb_rss_filter_list igb_filter_rss_list; + /** * Please aware there's an asumption for all the parsers. * rte_flow_item is using big endian, rte_flow_attr and @@ -205,7 +182,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_mask = item->mask; /** * Only support src & dst addresses, protocol, * others should be masked. @@ -228,7 +205,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->src_ip_mask = ipv4_mask->hdr.src_addr; filter->proto_mask = ipv4_mask->hdr.next_proto_id; - ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; + ipv4_spec = item->spec; filter->dst_ip = ipv4_spec->hdr.dst_addr; filter->src_ip = ipv4_spec->hdr.src_addr; filter->proto = ipv4_spec->hdr.next_proto_id; @@ -258,7 +235,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* get the TCP/UDP/SCTP info */ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { if (item->spec && item->mask) { - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_mask = item->mask; /** * Only support src & dst ports, tcp flags, @@ -293,14 +270,14 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_spec = item->spec; filter->dst_port = tcp_spec->hdr.dst_port; filter->src_port = tcp_spec->hdr.src_port; filter->tcp_flags = tcp_spec->hdr.tcp_flags; } } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { if (item->spec && item->mask) { - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_mask = item->mask; /** * Only support src & dst ports, @@ -319,14 +296,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = udp_mask->hdr.dst_port; filter->src_port_mask = udp_mask->hdr.src_port; - udp_spec = (const struct rte_flow_item_udp *)item->spec; + udp_spec = item->spec; filter->dst_port = udp_spec->hdr.dst_port; filter->src_port = udp_spec->hdr.src_port; } } else { if (item->spec && item->mask) { - sctp_mask = (const struct rte_flow_item_sctp *) - item->mask; + sctp_mask = item->mask; /** * Only support src & dst ports, @@ -410,6 +386,15 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -563,16 +548,16 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /* Mask bits of source MAC address must be full of 0. * Mask bits of destination MAC address must be full * of 1 or full of 0. */ - if (!is_zero_ether_addr(ð_mask->src) || - (!is_zero_ether_addr(ð_mask->dst) && - !is_broadcast_ether_addr(ð_mask->dst))) { + if (!rte_is_zero_ether_addr(ð_mask->src) || + (!rte_is_zero_ether_addr(ð_mask->dst) && + !rte_is_broadcast_ether_addr(ð_mask->dst))) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid ether address mask"); @@ -589,7 +574,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, /* If mask bits of destination MAC address * are full of 1, set RTE_ETHTYPE_FLAGS_MAC. */ - if (is_broadcast_ether_addr(ð_mask->dst)) { + if (rte_is_broadcast_ether_addr(ð_mask->dst)) { filter->mac_addr = eth_spec->dst; filter->flags |= RTE_ETHTYPE_FLAGS_MAC; } else { @@ -654,6 +639,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* Not supported */ + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Not supported */ if (attr->priority) { rte_flow_error_set(error, EINVAL, @@ -714,8 +707,8 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev, } } - if (filter->ether_type == ETHER_TYPE_IPv4 || - filter->ether_type == ETHER_TYPE_IPv6) { + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -878,15 +871,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; - if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) || + tcp_spec = item->spec; + tcp_mask = item->mask; + if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) || tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port || tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack || tcp_mask->hdr.data_off || - tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG || + tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG || tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) { @@ -954,6 +947,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Support 2 priorities, the lowest or highest. */ if (!attr->priority) { filter->hig_pri = 0; @@ -1095,8 +1097,8 @@ item_loop: return -rte_errno; } - raw_spec = (const struct rte_flow_item_raw *)item->spec; - raw_mask = (const struct rte_flow_item_raw *)item->mask; + raw_spec = item->spec; + raw_mask = item->mask; if (!raw_mask->length || !raw_mask->relative) { @@ -1242,6 +1244,15 @@ item_loop: return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(filter, 0, sizeof(struct rte_eth_flex_filter)); rte_flow_error_set(error, EINVAL, @@ -1296,6 +1307,126 @@ igb_parse_flex_filter(struct rte_eth_dev *dev, return 0; } +static int +igb_parse_rss_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_action actions[], + struct igb_rte_flow_rss_conf *rss_conf, + struct rte_flow_error *error) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + const struct rte_flow_action *act; + const struct rte_flow_action_rss *rss; + uint16_t n, index; + + /** + * rss only supports forwarding, + * check if the first not void action is RSS. + */ + index = 0; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) { + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + rss = (const struct rte_flow_action_rss *)act->conf; + + if (!rss || !rss->queue_num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + + for (n = 0; n < rss->queue_num; n++) { + if (rss->queue[n] >= dev->data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "queue id > max number of queues"); + return -rte_errno; + } + } + + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key must be exactly 40 bytes"); + if (((hw->mac.type == e1000_82576) && + (rss->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) || + ((hw->mac.type != e1000_82576) && + (rss->queue_num > IGB_MAX_RX_QUEUE_NUM))) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (igb_rss_conf_init(dev, rss_conf, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); + + /* check if the next not void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + if (attr->priority > 0xFFFF) { + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Error priority."); + return -rte_errno; + } + + return 0; +} + /** * Create a flow rule. * Theorically one rule can match more than one filters. @@ -1314,11 +1445,13 @@ igb_flow_create(struct rte_eth_dev *dev, struct rte_eth_ethertype_filter ethertype_filter; struct rte_eth_syn_filter syn_filter; struct rte_eth_flex_filter flex_filter; + struct igb_rte_flow_rss_conf rss_conf; struct rte_flow *flow = NULL; struct igb_ntuple_filter_ele *ntuple_filter_ptr; struct igb_ethertype_filter_ele *ethertype_filter_ptr; struct igb_eth_syn_filter_ele *syn_filter_ptr; struct igb_flex_filter_ele *flex_filter_ptr; + struct igb_rss_conf_ele *rss_filter_ptr; struct igb_flow_mem *igb_flow_mem_ptr; flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0); @@ -1346,6 +1479,11 @@ igb_flow_create(struct rte_eth_dev *dev, if (!ret) { ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter", sizeof(struct igb_ntuple_filter_ele), 0); + if (!ntuple_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&ntuple_filter_ptr->filter_info, &ntuple_filter, sizeof(struct rte_eth_ntuple_filter)); @@ -1368,6 +1506,11 @@ igb_flow_create(struct rte_eth_dev *dev, ethertype_filter_ptr = rte_zmalloc( "igb_ethertype_filter", sizeof(struct igb_ethertype_filter_ele), 0); + if (!ethertype_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(ðertype_filter_ptr->filter_info, ðertype_filter, sizeof(struct rte_eth_ethertype_filter)); @@ -1388,6 +1531,11 @@ igb_flow_create(struct rte_eth_dev *dev, if (!ret) { syn_filter_ptr = rte_zmalloc("igb_syn_filter", sizeof(struct igb_eth_syn_filter_ele), 0); + if (!syn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&syn_filter_ptr->filter_info, &syn_filter, sizeof(struct rte_eth_syn_filter)); @@ -1409,6 +1557,11 @@ igb_flow_create(struct rte_eth_dev *dev, if (!ret) { flex_filter_ptr = rte_zmalloc("igb_flex_filter", sizeof(struct igb_flex_filter_ele), 0); + if (!flex_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&flex_filter_ptr->filter_info, &flex_filter, sizeof(struct rte_eth_flex_filter)); @@ -1420,6 +1573,28 @@ igb_flow_create(struct rte_eth_dev *dev, } } + memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + ret = igb_parse_rss_filter(dev, attr, + actions, &rss_conf, error); + if (!ret) { + ret = igb_config_rss_filter(dev, &rss_conf, TRUE); + if (!ret) { + rss_filter_ptr = rte_zmalloc("igb_rss_filter", + sizeof(struct igb_rss_conf_ele), 0); + if (!rss_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + igb_rss_conf_init(dev, &rss_filter_ptr->filter_info, + &rss_conf.conf); + TAILQ_INSERT_TAIL(&igb_filter_rss_list, + rss_filter_ptr, entries); + flow->rule = rss_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_HASH; + return flow; + } + } + out: TAILQ_REMOVE(&igb_flow_list, igb_flow_mem_ptr, entries); @@ -1447,6 +1622,7 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev, struct rte_eth_ethertype_filter ethertype_filter; struct rte_eth_syn_filter syn_filter; struct rte_eth_flex_filter flex_filter; + struct igb_rte_flow_rss_conf rss_conf; int ret; memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); @@ -1470,6 +1646,12 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev, memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter)); ret = igb_parse_flex_filter(dev, attr, pattern, actions, &flex_filter, error); + if (!ret) + return 0; + + memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + ret = igb_parse_rss_filter(dev, attr, + actions, &rss_conf, error); return ret; } @@ -1488,6 +1670,7 @@ igb_flow_destroy(struct rte_eth_dev *dev, struct igb_eth_syn_filter_ele *syn_filter_ptr; struct igb_flex_filter_ele *flex_filter_ptr; struct igb_flow_mem *igb_flow_mem_ptr; + struct igb_rss_conf_ele *rss_filter_ptr; switch (filter_type) { case RTE_ETH_FILTER_NTUPLE: @@ -1534,6 +1717,17 @@ igb_flow_destroy(struct rte_eth_dev *dev, rte_free(flex_filter_ptr); } break; + case RTE_ETH_FILTER_HASH: + rss_filter_ptr = (struct igb_rss_conf_ele *) + pmd_flow->rule; + ret = igb_config_rss_filter(dev, + &rss_filter_ptr->filter_info, FALSE); + if (!ret) { + TAILQ_REMOVE(&igb_filter_rss_list, + rss_filter_ptr, entries); + rte_free(rss_filter_ptr); + } + break; default: PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", filter_type); @@ -1622,6 +1816,17 @@ igb_clear_all_flex_filter(struct rte_eth_dev *dev) igb_remove_flex_filter(dev, flex_filter); } +/* remove the rss filter */ +static void +igb_clear_rss_filter(struct rte_eth_dev *dev) +{ + struct e1000_filter_info *filter = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + if (filter->rss_info.conf.queue_num) + igb_config_rss_filter(dev, &filter->rss_info, FALSE); +} + void igb_filterlist_flush(struct rte_eth_dev *dev) { @@ -1629,6 +1834,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev) struct igb_ethertype_filter_ele *ethertype_filter_ptr; struct igb_eth_syn_filter_ele *syn_filter_ptr; struct igb_flex_filter_ele *flex_filter_ptr; + struct igb_rss_conf_ele *rss_filter_ptr; struct igb_flow_mem *igb_flow_mem_ptr; enum rte_filter_type filter_type; struct rte_flow *pmd_flow; @@ -1671,6 +1877,14 @@ igb_filterlist_flush(struct rte_eth_dev *dev) flex_filter_ptr, entries); rte_free(flex_filter_ptr); break; + case RTE_ETH_FILTER_HASH: + rss_filter_ptr = + (struct igb_rss_conf_ele *) + pmd_flow->rule; + TAILQ_REMOVE(&igb_filter_rss_list, + rss_filter_ptr, entries); + rte_free(rss_filter_ptr); + break; default: PMD_DRV_LOG(WARNING, "Filter type" "(%d) not supported", filter_type); @@ -1694,6 +1908,7 @@ igb_flow_flush(struct rte_eth_dev *dev, igb_clear_all_ethertype_filter(dev); igb_clear_syn_filter(dev); igb_clear_all_flex_filter(dev); + igb_clear_rss_filter(dev); igb_filterlist_flush(dev); return 0;