X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ftxgbe%2Ftxgbe_flow.c;h=eae400b14176da07e7e16b501389623da206c5bf;hb=1c9e61b3a45321b3cae742e8769b10c06c0324f0;hp=47bebb98b333c8fd3bd00f2f772d704ea2bd0ad7;hpb=b973ee26747ada1b5bc6786ed6d71601fa8e6699;p=dpdk.git diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c index 47bebb98b3..eae400b141 100644 --- a/drivers/net/txgbe/txgbe_flow.c +++ b/drivers/net/txgbe/txgbe_flow.c @@ -1,9 +1,11 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2020 + * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd. + * Copyright(c) 2010-2017 Intel Corporation */ #include #include +#include #include #include @@ -13,6 +15,58 @@ #define TXGBE_MAX_N_TUPLE_PRIO 7 #define TXGBE_MAX_FLX_SOURCE_OFF 62 +/* ntuple filter list structure */ +struct txgbe_ntuple_filter_ele { + TAILQ_ENTRY(txgbe_ntuple_filter_ele) entries; + struct rte_eth_ntuple_filter filter_info; +}; +/* ethertype filter list structure */ +struct txgbe_ethertype_filter_ele { + TAILQ_ENTRY(txgbe_ethertype_filter_ele) entries; + struct rte_eth_ethertype_filter filter_info; +}; +/* syn filter list structure */ +struct txgbe_eth_syn_filter_ele { + TAILQ_ENTRY(txgbe_eth_syn_filter_ele) entries; + struct rte_eth_syn_filter filter_info; +}; +/* fdir filter list structure */ +struct txgbe_fdir_rule_ele { + TAILQ_ENTRY(txgbe_fdir_rule_ele) entries; + struct txgbe_fdir_rule filter_info; +}; +/* l2_tunnel filter list structure */ +struct txgbe_eth_l2_tunnel_conf_ele { + TAILQ_ENTRY(txgbe_eth_l2_tunnel_conf_ele) entries; + struct txgbe_l2_tunnel_conf filter_info; +}; +/* rss filter list structure */ +struct txgbe_rss_conf_ele { + TAILQ_ENTRY(txgbe_rss_conf_ele) entries; + struct txgbe_rte_flow_rss_conf filter_info; +}; +/* txgbe_flow memory list structure */ +struct txgbe_flow_mem { + TAILQ_ENTRY(txgbe_flow_mem) entries; + struct rte_flow *flow; +}; + +TAILQ_HEAD(txgbe_ntuple_filter_list, txgbe_ntuple_filter_ele); +TAILQ_HEAD(txgbe_ethertype_filter_list, txgbe_ethertype_filter_ele); +TAILQ_HEAD(txgbe_syn_filter_list, txgbe_eth_syn_filter_ele); +TAILQ_HEAD(txgbe_fdir_rule_filter_list, txgbe_fdir_rule_ele); +TAILQ_HEAD(txgbe_l2_tunnel_filter_list, txgbe_eth_l2_tunnel_conf_ele); +TAILQ_HEAD(txgbe_rss_filter_list, txgbe_rss_conf_ele); +TAILQ_HEAD(txgbe_flow_mem_list, txgbe_flow_mem); + +static struct txgbe_ntuple_filter_list filter_ntuple_list; +static struct txgbe_ethertype_filter_list filter_ethertype_list; +static struct txgbe_syn_filter_list filter_syn_list; +static struct txgbe_fdir_rule_filter_list filter_fdir_list; +static struct txgbe_l2_tunnel_filter_list filter_l2_tunnel_list; +static struct txgbe_rss_filter_list filter_rss_list; +static struct txgbe_flow_mem_list txgbe_flow_list; + /** * Endless loop will never happen with below assumption * 1. there is at least one no-void item(END) @@ -76,6 +130,9 @@ const struct rte_flow_action *next_no_void_action( * END * other members in mask and spec should set to 0x00. * item->last should be NULL. + * + * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY. + * */ static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr, @@ -124,6 +181,43 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, memset(ð_null, 0, sizeof(struct rte_flow_item_eth)); memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan)); +#ifdef RTE_LIB_SECURITY + /** + * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY + */ + act = next_no_void_action(actions, NULL); + if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) { + const void *conf = act->conf; + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* get the IP pattern*/ + item = next_no_void_pattern(pattern, NULL); + while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6) { + if (item->last || + item->type == RTE_FLOW_ITEM_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "IP pattern missing."); + return -rte_errno; + } + item = next_no_void_pattern(pattern, item); + } + + filter->proto = IPPROTO_ESP; + return txgbe_crypto_add_ingress_sa_from_flow(conf, item->spec, + item->type == RTE_FLOW_ITEM_TYPE_IPV6); + } +#endif + /* the first not void item can be MAC or IPv4 */ item = next_no_void_pattern(pattern, NULL); @@ -147,11 +241,10 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } /* if the first item is MAC, the content should be NULL */ - if ((item->spec || item->mask) && - (memcmp(eth_spec, ð_null, - sizeof(struct rte_flow_item_eth)) || - memcmp(eth_mask, ð_null, - sizeof(struct rte_flow_item_eth)))) { + if ((item->spec && memcmp(eth_spec, ð_null, + sizeof(struct rte_flow_item_eth))) || + (item->mask && memcmp(eth_mask, ð_null, + sizeof(struct rte_flow_item_eth)))) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -179,11 +272,10 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } /* the content should be NULL */ - if ((item->spec || item->mask) && - (memcmp(vlan_spec, &vlan_null, - sizeof(struct rte_flow_item_vlan)) || - memcmp(vlan_mask, &vlan_null, - sizeof(struct rte_flow_item_vlan)))) { + if ((item->spec && memcmp(vlan_spec, &vlan_null, + sizeof(struct rte_flow_item_vlan))) || + (item->mask && memcmp(vlan_mask, &vlan_null, + sizeof(struct rte_flow_item_vlan)))) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -419,7 +511,7 @@ action: memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, - item, "Not supported action."); + act, "Not supported action."); return -rte_errno; } filter->queue = @@ -494,6 +586,12 @@ txgbe_parse_ntuple_filter(struct rte_eth_dev *dev, if (ret) return ret; +#ifdef RTE_LIB_SECURITY + /* ESP flow not really a flow */ + if (filter->proto == IPPROTO_ESP) + return 0; +#endif + /* txgbe doesn't support tcp flags */ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); @@ -2370,10 +2468,208 @@ step_next: return ret; } +static int +txgbe_parse_rss_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_action actions[], + struct txgbe_rte_flow_rss_conf *rss_conf, + struct rte_flow_error *error) +{ + const struct rte_flow_action *act; + const struct rte_flow_action_rss *rss; + uint16_t n; + + /** + * rss only supports forwarding, + * check if the first not void action is RSS. + */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) { + memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + rss = (const struct rte_flow_action_rss *)act->conf; + + if (!rss || !rss->queue_num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + + for (n = 0; n < rss->queue_num; n++) { + if (rss->queue[n] >= dev->data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "queue id > max number of queues"); + return -rte_errno; + } + } + + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key must be exactly 40 bytes"); + if (rss->queue_num > RTE_DIM(rss_conf->queue)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (txgbe_rss_conf_init(rss_conf, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); + + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + if (attr->priority > 0xFFFF) { + memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Error priority."); + return -rte_errno; + } + + return 0; +} + +/* remove the rss filter */ +static void +txgbe_clear_rss_filter(struct rte_eth_dev *dev) +{ + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + + if (filter_info->rss_info.conf.queue_num) + txgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE); +} + +void +txgbe_filterlist_init(void) +{ + TAILQ_INIT(&filter_ntuple_list); + TAILQ_INIT(&filter_ethertype_list); + TAILQ_INIT(&filter_syn_list); + TAILQ_INIT(&filter_fdir_list); + TAILQ_INIT(&filter_l2_tunnel_list); + TAILQ_INIT(&filter_rss_list); + TAILQ_INIT(&txgbe_flow_list); +} + +void +txgbe_filterlist_flush(void) +{ + struct txgbe_ntuple_filter_ele *ntuple_filter_ptr; + struct txgbe_ethertype_filter_ele *ethertype_filter_ptr; + struct txgbe_eth_syn_filter_ele *syn_filter_ptr; + struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; + struct txgbe_fdir_rule_ele *fdir_rule_ptr; + struct txgbe_flow_mem *txgbe_flow_mem_ptr; + struct txgbe_rss_conf_ele *rss_filter_ptr; + + while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) { + TAILQ_REMOVE(&filter_ntuple_list, + ntuple_filter_ptr, + entries); + rte_free(ntuple_filter_ptr); + } + + while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) { + TAILQ_REMOVE(&filter_ethertype_list, + ethertype_filter_ptr, + entries); + rte_free(ethertype_filter_ptr); + } + + while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) { + TAILQ_REMOVE(&filter_syn_list, + syn_filter_ptr, + entries); + rte_free(syn_filter_ptr); + } + + while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) { + TAILQ_REMOVE(&filter_l2_tunnel_list, + l2_tn_filter_ptr, + entries); + rte_free(l2_tn_filter_ptr); + } + + while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) { + TAILQ_REMOVE(&filter_fdir_list, + fdir_rule_ptr, + entries); + rte_free(fdir_rule_ptr); + } + + while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) { + TAILQ_REMOVE(&filter_rss_list, + rss_filter_ptr, + entries); + rte_free(rss_filter_ptr); + } + + while ((txgbe_flow_mem_ptr = TAILQ_FIRST(&txgbe_flow_list))) { + TAILQ_REMOVE(&txgbe_flow_list, + txgbe_flow_mem_ptr, + entries); + rte_free(txgbe_flow_mem_ptr->flow); + rte_free(txgbe_flow_mem_ptr); + } +} + /** * Create or destroy a flow rule. * Theorically one rule can match more than one filters. - * We will let it use the filter which it hitt first. + * We will let it use the filter which it hit first. * So, the sequence matters. */ static struct rte_flow * @@ -2383,8 +2679,253 @@ txgbe_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { + int ret; + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; + struct txgbe_fdir_rule fdir_rule; + struct txgbe_l2_tunnel_conf l2_tn_filter; + struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev); + struct txgbe_rte_flow_rss_conf rss_conf; struct rte_flow *flow = NULL; - return flow; + struct txgbe_ntuple_filter_ele *ntuple_filter_ptr; + struct txgbe_ethertype_filter_ele *ethertype_filter_ptr; + struct txgbe_eth_syn_filter_ele *syn_filter_ptr; + struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; + struct txgbe_fdir_rule_ele *fdir_rule_ptr; + struct txgbe_rss_conf_ele *rss_filter_ptr; + struct txgbe_flow_mem *txgbe_flow_mem_ptr; + uint8_t first_mask = FALSE; + + flow = rte_zmalloc("txgbe_rte_flow", sizeof(struct rte_flow), 0); + if (!flow) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return (struct rte_flow *)flow; + } + txgbe_flow_mem_ptr = rte_zmalloc("txgbe_flow_mem", + sizeof(struct txgbe_flow_mem), 0); + if (!txgbe_flow_mem_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + rte_free(flow); + return NULL; + } + txgbe_flow_mem_ptr->flow = flow; + TAILQ_INSERT_TAIL(&txgbe_flow_list, + txgbe_flow_mem_ptr, entries); + + memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + ret = txgbe_parse_ntuple_filter(dev, attr, pattern, + actions, &ntuple_filter, error); + +#ifdef RTE_LIB_SECURITY + /* ESP flow not really a flow*/ + if (ntuple_filter.proto == IPPROTO_ESP) + return flow; +#endif + + if (!ret) { + ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE); + if (!ret) { + ntuple_filter_ptr = rte_zmalloc("txgbe_ntuple_filter", + sizeof(struct txgbe_ntuple_filter_ele), 0); + if (!ntuple_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&ntuple_filter_ptr->filter_info, + &ntuple_filter, + sizeof(struct rte_eth_ntuple_filter)); + TAILQ_INSERT_TAIL(&filter_ntuple_list, + ntuple_filter_ptr, entries); + flow->rule = ntuple_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_NTUPLE; + return flow; + } + goto out; + } + + memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + ret = txgbe_parse_ethertype_filter(dev, attr, pattern, + actions, ðertype_filter, error); + if (!ret) { + ret = txgbe_add_del_ethertype_filter(dev, + ðertype_filter, TRUE); + if (!ret) { + ethertype_filter_ptr = + rte_zmalloc("txgbe_ethertype_filter", + sizeof(struct txgbe_ethertype_filter_ele), 0); + if (!ethertype_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(ðertype_filter_ptr->filter_info, + ðertype_filter, + sizeof(struct rte_eth_ethertype_filter)); + TAILQ_INSERT_TAIL(&filter_ethertype_list, + ethertype_filter_ptr, entries); + flow->rule = ethertype_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_ETHERTYPE; + return flow; + } + goto out; + } + + memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + ret = txgbe_parse_syn_filter(dev, attr, pattern, + actions, &syn_filter, error); + if (!ret) { + ret = txgbe_syn_filter_set(dev, &syn_filter, TRUE); + if (!ret) { + syn_filter_ptr = rte_zmalloc("txgbe_syn_filter", + sizeof(struct txgbe_eth_syn_filter_ele), 0); + if (!syn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&syn_filter_ptr->filter_info, + &syn_filter, + sizeof(struct rte_eth_syn_filter)); + TAILQ_INSERT_TAIL(&filter_syn_list, + syn_filter_ptr, + entries); + flow->rule = syn_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_SYN; + return flow; + } + goto out; + } + + memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule)); + ret = txgbe_parse_fdir_filter(dev, attr, pattern, + actions, &fdir_rule, error); + if (!ret) { + /* A mask cannot be deleted. */ + if (fdir_rule.b_mask) { + if (!fdir_info->mask_added) { + /* It's the first time the mask is set. */ + rte_memcpy(&fdir_info->mask, + &fdir_rule.mask, + sizeof(struct txgbe_hw_fdir_mask)); + fdir_info->flex_bytes_offset = + fdir_rule.flex_bytes_offset; + + if (fdir_rule.mask.flex_bytes_mask) + txgbe_fdir_set_flexbytes_offset(dev, + fdir_rule.flex_bytes_offset); + + ret = txgbe_fdir_set_input_mask(dev); + if (ret) + goto out; + + fdir_info->mask_added = TRUE; + first_mask = TRUE; + } else { + /** + * Only support one global mask, + * all the masks should be the same. + */ + ret = memcmp(&fdir_info->mask, + &fdir_rule.mask, + sizeof(struct txgbe_hw_fdir_mask)); + if (ret) + goto out; + + if (fdir_info->flex_bytes_offset != + fdir_rule.flex_bytes_offset) + goto out; + } + } + + if (fdir_rule.b_spec) { + ret = txgbe_fdir_filter_program(dev, &fdir_rule, + FALSE, FALSE); + if (!ret) { + fdir_rule_ptr = rte_zmalloc("txgbe_fdir_filter", + sizeof(struct txgbe_fdir_rule_ele), 0); + if (!fdir_rule_ptr) { + PMD_DRV_LOG(ERR, + "failed to allocate memory"); + goto out; + } + rte_memcpy(&fdir_rule_ptr->filter_info, + &fdir_rule, + sizeof(struct txgbe_fdir_rule)); + TAILQ_INSERT_TAIL(&filter_fdir_list, + fdir_rule_ptr, entries); + flow->rule = fdir_rule_ptr; + flow->filter_type = RTE_ETH_FILTER_FDIR; + + return flow; + } + + if (ret) { + /** + * clean the mask_added flag if fail to + * program + **/ + if (first_mask) + fdir_info->mask_added = FALSE; + goto out; + } + } + + goto out; + } + + memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf)); + ret = txgbe_parse_l2_tn_filter(dev, attr, pattern, + actions, &l2_tn_filter, error); + if (!ret) { + ret = txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE); + if (!ret) { + l2_tn_filter_ptr = rte_zmalloc("txgbe_l2_tn_filter", + sizeof(struct txgbe_eth_l2_tunnel_conf_ele), 0); + if (!l2_tn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&l2_tn_filter_ptr->filter_info, + &l2_tn_filter, + sizeof(struct txgbe_l2_tunnel_conf)); + TAILQ_INSERT_TAIL(&filter_l2_tunnel_list, + l2_tn_filter_ptr, entries); + flow->rule = l2_tn_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL; + return flow; + } + } + + memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf)); + ret = txgbe_parse_rss_filter(dev, attr, + actions, &rss_conf, error); + if (!ret) { + ret = txgbe_config_rss_filter(dev, &rss_conf, TRUE); + if (!ret) { + rss_filter_ptr = rte_zmalloc("txgbe_rss_filter", + sizeof(struct txgbe_rss_conf_ele), 0); + if (!rss_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + txgbe_rss_conf_init(&rss_filter_ptr->filter_info, + &rss_conf.conf); + TAILQ_INSERT_TAIL(&filter_rss_list, + rss_filter_ptr, entries); + flow->rule = rss_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_HASH; + return flow; + } + } + +out: + TAILQ_REMOVE(&txgbe_flow_list, + txgbe_flow_mem_ptr, entries); + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow."); + rte_free(txgbe_flow_mem_ptr); + rte_free(flow); + return NULL; } /** @@ -2404,6 +2945,7 @@ txgbe_flow_validate(struct rte_eth_dev *dev, struct rte_eth_syn_filter syn_filter; struct txgbe_l2_tunnel_conf l2_tn_filter; struct txgbe_fdir_rule fdir_rule; + struct txgbe_rte_flow_rss_conf rss_conf; int ret = 0; memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); @@ -2436,6 +2978,10 @@ txgbe_flow_validate(struct rte_eth_dev *dev, if (!ret) return 0; + memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf)); + ret = txgbe_parse_rss_filter(dev, attr, + actions, &rss_conf, error); + return ret; } @@ -2446,6 +2992,122 @@ txgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow_error *error) { int ret = 0; + struct rte_flow *pmd_flow = flow; + enum rte_filter_type filter_type = pmd_flow->filter_type; + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; + struct txgbe_fdir_rule fdir_rule; + struct txgbe_l2_tunnel_conf l2_tn_filter; + struct txgbe_ntuple_filter_ele *ntuple_filter_ptr; + struct txgbe_ethertype_filter_ele *ethertype_filter_ptr; + struct txgbe_eth_syn_filter_ele *syn_filter_ptr; + struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; + struct txgbe_fdir_rule_ele *fdir_rule_ptr; + struct txgbe_flow_mem *txgbe_flow_mem_ptr; + struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev); + struct txgbe_rss_conf_ele *rss_filter_ptr; + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *) + pmd_flow->rule; + rte_memcpy(&ntuple_filter, + &ntuple_filter_ptr->filter_info, + sizeof(struct rte_eth_ntuple_filter)); + ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_ntuple_list, + ntuple_filter_ptr, entries); + rte_free(ntuple_filter_ptr); + } + break; + case RTE_ETH_FILTER_ETHERTYPE: + ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *) + pmd_flow->rule; + rte_memcpy(ðertype_filter, + ðertype_filter_ptr->filter_info, + sizeof(struct rte_eth_ethertype_filter)); + ret = txgbe_add_del_ethertype_filter(dev, + ðertype_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_ethertype_list, + ethertype_filter_ptr, entries); + rte_free(ethertype_filter_ptr); + } + break; + case RTE_ETH_FILTER_SYN: + syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *) + pmd_flow->rule; + rte_memcpy(&syn_filter, + &syn_filter_ptr->filter_info, + sizeof(struct rte_eth_syn_filter)); + ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_syn_list, + syn_filter_ptr, entries); + rte_free(syn_filter_ptr); + } + break; + case RTE_ETH_FILTER_FDIR: + fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule; + rte_memcpy(&fdir_rule, + &fdir_rule_ptr->filter_info, + sizeof(struct txgbe_fdir_rule)); + ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_fdir_list, + fdir_rule_ptr, entries); + rte_free(fdir_rule_ptr); + if (TAILQ_EMPTY(&filter_fdir_list)) + fdir_info->mask_added = false; + } + break; + case RTE_ETH_FILTER_L2_TUNNEL: + l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *) + pmd_flow->rule; + rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info, + sizeof(struct txgbe_l2_tunnel_conf)); + ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter); + if (!ret) { + TAILQ_REMOVE(&filter_l2_tunnel_list, + l2_tn_filter_ptr, entries); + rte_free(l2_tn_filter_ptr); + } + break; + case RTE_ETH_FILTER_HASH: + rss_filter_ptr = (struct txgbe_rss_conf_ele *) + pmd_flow->rule; + ret = txgbe_config_rss_filter(dev, + &rss_filter_ptr->filter_info, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_rss_list, + rss_filter_ptr, entries); + rte_free(rss_filter_ptr); + } + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to destroy flow"); + return ret; + } + + TAILQ_FOREACH(txgbe_flow_mem_ptr, &txgbe_flow_list, entries) { + if (txgbe_flow_mem_ptr->flow == pmd_flow) { + TAILQ_REMOVE(&txgbe_flow_list, + txgbe_flow_mem_ptr, entries); + rte_free(txgbe_flow_mem_ptr); + } + } + rte_free(flow); return ret; } @@ -2457,7 +3119,29 @@ txgbe_flow_flush(struct rte_eth_dev *dev, { int ret = 0; - return ret; + txgbe_clear_all_ntuple_filter(dev); + txgbe_clear_all_ethertype_filter(dev); + txgbe_clear_syn_filter(dev); + + ret = txgbe_clear_all_fdir_filter(dev); + if (ret < 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to flush rule"); + return ret; + } + + ret = txgbe_clear_all_l2_tn_filter(dev); + if (ret < 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to flush rule"); + return ret; + } + + txgbe_clear_rss_filter(dev); + + txgbe_filterlist_flush(); + + return 0; } const struct rte_flow_ops txgbe_flow_ops = {