* END
* other members in mask and spec should set to 0x00.
* item->last should be NULL.
+ *
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
+ *
*/
static int
cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
+#ifdef RTE_LIB_SECURITY
+ /**
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
+ */
+ act = next_no_void_action(actions, NULL);
+ if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+ const void *conf = act->conf;
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* get the IP pattern*/
+ item = next_no_void_pattern(pattern, NULL);
+ while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ if (item->last ||
+ item->type == RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "IP pattern missing.");
+ return -rte_errno;
+ }
+ item = next_no_void_pattern(pattern, item);
+ }
+
+ filter->proto = IPPROTO_ESP;
+ return txgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+ }
+#endif
+
/* the first not void item can be MAC or IPv4 */
item = next_no_void_pattern(pattern, NULL);
if (ret)
return ret;
+#ifdef RTE_LIB_SECURITY
+ /* ESP flow not really a flow */
+ if (filter->proto == IPPROTO_ESP)
+ return 0;
+#endif
+
/* txgbe doesn't support tcp flags */
if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
return 0;
}
+/* remove the rss filter */
+static void
+txgbe_clear_rss_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+ if (filter_info->rss_info.conf.queue_num)
+ txgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
+}
+
void
txgbe_filterlist_init(void)
{
TAILQ_INIT(&txgbe_flow_list);
}
+void
+txgbe_filterlist_flush(void)
+{
+ struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct txgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct txgbe_flow_mem *txgbe_flow_mem_ptr;
+ struct txgbe_rss_conf_ele *rss_filter_ptr;
+
+ while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
+ TAILQ_REMOVE(&filter_ntuple_list,
+ ntuple_filter_ptr,
+ entries);
+ rte_free(ntuple_filter_ptr);
+ }
+
+ while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
+ TAILQ_REMOVE(&filter_ethertype_list,
+ ethertype_filter_ptr,
+ entries);
+ rte_free(ethertype_filter_ptr);
+ }
+
+ while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
+ TAILQ_REMOVE(&filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ rte_free(syn_filter_ptr);
+ }
+
+ while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
+ TAILQ_REMOVE(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr,
+ entries);
+ rte_free(l2_tn_filter_ptr);
+ }
+
+ while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
+ TAILQ_REMOVE(&filter_fdir_list,
+ fdir_rule_ptr,
+ entries);
+ rte_free(fdir_rule_ptr);
+ }
+
+ while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
+ TAILQ_REMOVE(&filter_rss_list,
+ rss_filter_ptr,
+ entries);
+ rte_free(rss_filter_ptr);
+ }
+
+ while ((txgbe_flow_mem_ptr = TAILQ_FIRST(&txgbe_flow_list))) {
+ TAILQ_REMOVE(&txgbe_flow_list,
+ txgbe_flow_mem_ptr,
+ entries);
+ rte_free(txgbe_flow_mem_ptr->flow);
+ rte_free(txgbe_flow_mem_ptr);
+ }
+}
+
/**
* Create or destroy a flow rule.
* Theorically one rule can match more than one filters.
ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
actions, &ntuple_filter, error);
+#ifdef RTE_LIB_SECURITY
+ /* ESP flow not really a flow*/
+ if (ntuple_filter.proto == IPPROTO_ESP)
+ return flow;
+#endif
+
if (!ret) {
ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
if (!ret) {
struct rte_flow_error *error)
{
int ret = 0;
+ struct rte_flow *pmd_flow = flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct txgbe_fdir_rule fdir_rule;
+ struct txgbe_l2_tunnel_conf l2_tn_filter;
+ struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct txgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct txgbe_flow_mem *txgbe_flow_mem_ptr;
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
+ struct txgbe_rss_conf_ele *rss_filter_ptr;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *)
+ pmd_flow->rule;
+ rte_memcpy(&ntuple_filter,
+ &ntuple_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ntuple_filter));
+ ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ rte_free(ntuple_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *)
+ pmd_flow->rule;
+ rte_memcpy(ðertype_filter,
+ ðertype_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ethertype_filter));
+ ret = txgbe_add_del_ethertype_filter(dev,
+ ðertype_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ rte_free(ethertype_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_SYN:
+ syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *)
+ pmd_flow->rule;
+ rte_memcpy(&syn_filter,
+ &syn_filter_ptr->filter_info,
+ sizeof(struct rte_eth_syn_filter));
+ ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_syn_list,
+ syn_filter_ptr, entries);
+ rte_free(syn_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule;
+ rte_memcpy(&fdir_rule,
+ &fdir_rule_ptr->filter_info,
+ sizeof(struct txgbe_fdir_rule));
+ ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_fdir_list,
+ fdir_rule_ptr, entries);
+ rte_free(fdir_rule_ptr);
+ if (TAILQ_EMPTY(&filter_fdir_list))
+ fdir_info->mask_added = false;
+ }
+ break;
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *)
+ pmd_flow->rule;
+ rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
+ sizeof(struct txgbe_l2_tunnel_conf));
+ ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr, entries);
+ rte_free(l2_tn_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_HASH:
+ rss_filter_ptr = (struct txgbe_rss_conf_ele *)
+ pmd_flow->rule;
+ ret = txgbe_config_rss_filter(dev,
+ &rss_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_rss_list,
+ rss_filter_ptr, entries);
+ rte_free(rss_filter_ptr);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to destroy flow");
+ return ret;
+ }
+
+ TAILQ_FOREACH(txgbe_flow_mem_ptr, &txgbe_flow_list, entries) {
+ if (txgbe_flow_mem_ptr->flow == pmd_flow) {
+ TAILQ_REMOVE(&txgbe_flow_list,
+ txgbe_flow_mem_ptr, entries);
+ rte_free(txgbe_flow_mem_ptr);
+ }
+ }
+ rte_free(flow);
return ret;
}
{
int ret = 0;
- return ret;
+ txgbe_clear_all_ntuple_filter(dev);
+ txgbe_clear_all_ethertype_filter(dev);
+ txgbe_clear_syn_filter(dev);
+
+ ret = txgbe_clear_all_fdir_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ ret = txgbe_clear_all_l2_tn_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ txgbe_clear_rss_filter(dev);
+
+ txgbe_filterlist_flush();
+
+ return 0;
}
const struct rte_flow_ops txgbe_flow_ops = {